genai-lite 0.4.0 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +47 -37
  2. package/dist/llm/LLMService.d.ts +29 -2
  3. package/dist/llm/LLMService.js +67 -36
  4. package/dist/llm/config.js +4 -4
  5. package/dist/llm/services/SettingsManager.js +17 -11
  6. package/dist/llm/types.d.ts +81 -22
  7. package/dist/prompting/parser.d.ts +2 -2
  8. package/dist/prompting/parser.js +2 -2
  9. package/package.json +1 -1
  10. package/dist/llm/LLMService.createMessages.test.d.ts +0 -4
  11. package/dist/llm/LLMService.createMessages.test.js +0 -364
  12. package/dist/llm/LLMService.original.d.ts +0 -147
  13. package/dist/llm/LLMService.original.js +0 -656
  14. package/dist/llm/LLMService.prepareMessage.test.d.ts +0 -1
  15. package/dist/llm/LLMService.prepareMessage.test.js +0 -303
  16. package/dist/llm/LLMService.presets.test.d.ts +0 -1
  17. package/dist/llm/LLMService.presets.test.js +0 -210
  18. package/dist/llm/LLMService.sendMessage.preset.test.d.ts +0 -1
  19. package/dist/llm/LLMService.sendMessage.preset.test.js +0 -153
  20. package/dist/llm/LLMService.test.d.ts +0 -1
  21. package/dist/llm/LLMService.test.js +0 -639
  22. package/dist/llm/clients/AnthropicClientAdapter.test.d.ts +0 -1
  23. package/dist/llm/clients/AnthropicClientAdapter.test.js +0 -273
  24. package/dist/llm/clients/GeminiClientAdapter.test.d.ts +0 -1
  25. package/dist/llm/clients/GeminiClientAdapter.test.js +0 -405
  26. package/dist/llm/clients/LlamaCppClientAdapter.test.d.ts +0 -1
  27. package/dist/llm/clients/LlamaCppClientAdapter.test.js +0 -447
  28. package/dist/llm/clients/LlamaCppServerClient.test.d.ts +0 -1
  29. package/dist/llm/clients/LlamaCppServerClient.test.js +0 -294
  30. package/dist/llm/clients/MockClientAdapter.test.d.ts +0 -1
  31. package/dist/llm/clients/MockClientAdapter.test.js +0 -250
  32. package/dist/llm/clients/OpenAIClientAdapter.test.d.ts +0 -1
  33. package/dist/llm/clients/OpenAIClientAdapter.test.js +0 -258
  34. package/dist/llm/clients/adapterErrorUtils.test.d.ts +0 -1
  35. package/dist/llm/clients/adapterErrorUtils.test.js +0 -123
  36. package/dist/llm/config.test.d.ts +0 -1
  37. package/dist/llm/config.test.js +0 -176
  38. package/dist/llm/services/AdapterRegistry.test.d.ts +0 -1
  39. package/dist/llm/services/AdapterRegistry.test.js +0 -239
  40. package/dist/llm/services/ModelResolver.test.d.ts +0 -1
  41. package/dist/llm/services/ModelResolver.test.js +0 -179
  42. package/dist/llm/services/PresetManager.test.d.ts +0 -1
  43. package/dist/llm/services/PresetManager.test.js +0 -210
  44. package/dist/llm/services/RequestValidator.test.d.ts +0 -1
  45. package/dist/llm/services/RequestValidator.test.js +0 -159
  46. package/dist/llm/services/SettingsManager.test.d.ts +0 -1
  47. package/dist/llm/services/SettingsManager.test.js +0 -266
  48. package/dist/prompting/builder.d.ts +0 -38
  49. package/dist/prompting/builder.js +0 -63
  50. package/dist/prompting/builder.test.d.ts +0 -4
  51. package/dist/prompting/builder.test.js +0 -109
  52. package/dist/prompting/content.test.d.ts +0 -4
  53. package/dist/prompting/content.test.js +0 -212
  54. package/dist/prompting/parser.test.d.ts +0 -4
  55. package/dist/prompting/parser.test.js +0 -464
  56. package/dist/prompting/template.test.d.ts +0 -1
  57. package/dist/prompting/template.test.js +0 -250
  58. package/dist/providers/fromEnvironment.test.d.ts +0 -1
  59. package/dist/providers/fromEnvironment.test.js +0 -59
@@ -103,7 +103,7 @@ export declare function parseRoleTags(template: string): Array<{
103
103
  * {
104
104
  * "settings": {
105
105
  * "temperature": 0.9,
106
- * "thinkingExtraction": { "enabled": true, "tag": "reasoning" }
106
+ * "thinkingTagFallback": { "enabled": true, "tagName": "reasoning" }
107
107
  * }
108
108
  * }
109
109
  * </META>
@@ -112,7 +112,7 @@ export declare function parseRoleTags(template: string): Array<{
112
112
  * `;
113
113
  *
114
114
  * const { metadata, content } = parseTemplateWithMetadata(template);
115
- * // metadata.settings will contain the temperature and thinkingExtraction settings
115
+ * // metadata.settings will contain the temperature and thinkingTagFallback settings
116
116
  * // content will contain the SYSTEM and USER tags
117
117
  */
118
118
  export declare function parseTemplateWithMetadata(template: string): {
@@ -146,7 +146,7 @@ function parseRoleTags(template) {
146
146
  * {
147
147
  * "settings": {
148
148
  * "temperature": 0.9,
149
- * "thinkingExtraction": { "enabled": true, "tag": "reasoning" }
149
+ * "thinkingTagFallback": { "enabled": true, "tagName": "reasoning" }
150
150
  * }
151
151
  * }
152
152
  * </META>
@@ -155,7 +155,7 @@ function parseRoleTags(template) {
155
155
  * `;
156
156
  *
157
157
  * const { metadata, content } = parseTemplateWithMetadata(template);
158
- * // metadata.settings will contain the temperature and thinkingExtraction settings
158
+ * // metadata.settings will contain the temperature and thinkingTagFallback settings
159
159
  * // content will contain the SYSTEM and USER tags
160
160
  */
161
161
  function parseTemplateWithMetadata(template) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "genai-lite",
3
- "version": "0.4.0",
3
+ "version": "0.4.1",
4
4
  "description": "A lightweight, portable toolkit for interacting with various Generative AI APIs.",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -1,4 +0,0 @@
1
- /**
2
- * Tests for LLMService.createMessages method
3
- */
4
- export {};
@@ -1,364 +0,0 @@
1
- "use strict";
2
- /**
3
- * Tests for LLMService.createMessages method
4
- */
5
- Object.defineProperty(exports, "__esModule", { value: true });
6
- const LLMService_1 = require("./LLMService");
7
- // Create a mock API key provider
8
- const mockApiKeyProvider = async () => 'test-api-key';
9
- describe('LLMService.createMessages', () => {
10
- let service;
11
- beforeEach(() => {
12
- service = new LLMService_1.LLMService(mockApiKeyProvider);
13
- });
14
- describe('Basic template parsing', () => {
15
- it('should parse simple template without model context', async () => {
16
- const result = await service.createMessages({
17
- template: 'Hello, how can I help you?'
18
- });
19
- expect(result.messages).toEqual([
20
- { role: 'user', content: 'Hello, how can I help you?' }
21
- ]);
22
- expect(result.modelContext).toBeNull();
23
- });
24
- it('should parse multi-turn template without model context', async () => {
25
- const result = await service.createMessages({
26
- template: `
27
- <SYSTEM>You are a helpful assistant.</SYSTEM>
28
- <USER>Hello</USER>
29
- <ASSISTANT>Hi! How can I help you today?</ASSISTANT>
30
- <USER>Can you explain {{topic}}?</USER>
31
- `,
32
- variables: { topic: 'promises in JavaScript' }
33
- });
34
- expect(result.messages).toEqual([
35
- { role: 'system', content: 'You are a helpful assistant.' },
36
- { role: 'user', content: 'Hello' },
37
- { role: 'assistant', content: 'Hi! How can I help you today?' },
38
- { role: 'user', content: 'Can you explain promises in JavaScript?' }
39
- ]);
40
- expect(result.modelContext).toBeNull();
41
- });
42
- });
43
- describe('Model-aware templates', () => {
44
- it('should inject model context for valid preset', async () => {
45
- const result = await service.createMessages({
46
- template: `
47
- <SYSTEM>You are a {{ thinking_enabled ? "thoughtful" : "standard" }} assistant.</SYSTEM>
48
- <USER>Help me understand {{concept}}</USER>
49
- `,
50
- variables: { concept: 'recursion' },
51
- presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
52
- });
53
- expect(result.modelContext).not.toBeNull();
54
- expect(result.modelContext?.thinking_enabled).toBe(true);
55
- expect(result.modelContext?.thinking_available).toBe(true);
56
- expect(result.modelContext?.model_id).toBe('claude-3-7-sonnet-20250219');
57
- expect(result.modelContext?.provider_id).toBe('anthropic');
58
- expect(result.messages).toEqual([
59
- { role: 'system', content: 'You are a thoughtful assistant.' },
60
- { role: 'user', content: 'Help me understand recursion' }
61
- ]);
62
- });
63
- it('should inject model context for valid provider/model combo', async () => {
64
- const result = await service.createMessages({
65
- template: 'Model: {{model_id}}, Provider: {{provider_id}}',
66
- providerId: 'openai',
67
- modelId: 'gpt-4.1'
68
- });
69
- expect(result.modelContext).not.toBeNull();
70
- expect(result.modelContext?.model_id).toBe('gpt-4.1');
71
- expect(result.modelContext?.provider_id).toBe('openai');
72
- expect(result.messages).toEqual([
73
- { role: 'user', content: 'Model: gpt-4.1, Provider: openai' }
74
- ]);
75
- });
76
- it('should handle model without reasoning support', async () => {
77
- const result = await service.createMessages({
78
- template: 'Thinking available: {{thinking_available}}, enabled: {{thinking_enabled}}',
79
- presetId: 'openai-gpt-4.1-default'
80
- });
81
- expect(result.modelContext?.thinking_available).toBe(false);
82
- expect(result.modelContext?.thinking_enabled).toBe(false);
83
- });
84
- });
85
- describe('Complex scenarios', () => {
86
- it('should handle variables that inject role tags', async () => {
87
- const result = await service.createMessages({
88
- template: `
89
- <SYSTEM>Base system prompt</SYSTEM>
90
- {{extraMessages}}
91
- <USER>Final question</USER>
92
- `,
93
- variables: {
94
- extraMessages: '<USER>First question</USER>\n<ASSISTANT>First answer</ASSISTANT>'
95
- }
96
- });
97
- expect(result.messages).toEqual([
98
- { role: 'system', content: 'Base system prompt' },
99
- { role: 'user', content: 'First question' },
100
- { role: 'assistant', content: 'First answer' },
101
- { role: 'user', content: 'Final question' }
102
- ]);
103
- });
104
- it('should handle conditional role injection based on model context', async () => {
105
- const result = await service.createMessages({
106
- template: `
107
- {{ thinking_enabled ? '<SYSTEM>Think step-by-step before answering.</SYSTEM>' : '' }}
108
- <USER>Solve: {{problem}}</USER>
109
- `,
110
- variables: { problem: 'What is 15% of 240?' },
111
- presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
112
- });
113
- expect(result.messages).toEqual([
114
- { role: 'system', content: 'Think step-by-step before answering.' },
115
- { role: 'user', content: 'Solve: What is 15% of 240?' }
116
- ]);
117
- });
118
- it('should handle nested conditionals with model context', async () => {
119
- const result = await service.createMessages({
120
- template: `
121
- <SYSTEM>
122
- You are using {{ model_id || "no model" }}.
123
- {{ thinking_available ? 'You have reasoning capabilities.' : 'Standard model.' }}
124
- </SYSTEM>
125
- <USER>Hello</USER>
126
- `,
127
- providerId: 'anthropic',
128
- modelId: 'claude-3-5-haiku-20241022'
129
- });
130
- expect(result.messages[0].role).toBe('system');
131
- // Check that we have model context
132
- expect(result.modelContext).not.toBeNull();
133
- if (result.modelContext) {
134
- expect(result.modelContext.model_id).toBe('claude-3-5-haiku-20241022');
135
- expect(result.modelContext.thinking_available).toBe(false);
136
- }
137
- });
138
- });
139
- describe('Error handling', () => {
140
- it('should proceed without model context on invalid preset', async () => {
141
- const result = await service.createMessages({
142
- template: 'Has model context: {{model_id ? "yes" : "no"}}',
143
- presetId: 'invalid-preset-id'
144
- });
145
- expect(result.modelContext).toBeNull();
146
- expect(result.messages).toEqual([
147
- { role: 'user', content: 'Has model context: no' }
148
- ]);
149
- });
150
- it('should handle invalid template syntax gracefully', async () => {
151
- const result = await service.createMessages({
152
- template: 'Unclosed conditional: {{ if true',
153
- variables: {}
154
- });
155
- // The template engine doesn't throw errors for invalid syntax, it renders as-is
156
- expect(result.messages).toEqual([
157
- { role: 'user', content: 'Unclosed conditional: {{ if true' }
158
- ]);
159
- });
160
- it('should handle empty template', async () => {
161
- const result = await service.createMessages({
162
- template: ''
163
- });
164
- expect(result.messages).toEqual([]);
165
- expect(result.modelContext).toBeNull();
166
- });
167
- it('should handle whitespace-only template', async () => {
168
- const result = await service.createMessages({
169
- template: ' \n\t '
170
- });
171
- expect(result.messages).toEqual([]);
172
- expect(result.modelContext).toBeNull();
173
- });
174
- });
175
- describe('Integration with reasoning settings', () => {
176
- it('should handle reasoning settings in model context', async () => {
177
- const result = await service.createMessages({
178
- template: `
179
- Thinking enabled: {{thinking_enabled}}
180
- Thinking available: {{thinking_available}}
181
- Model: {{model_id}}
182
- `,
183
- presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
184
- });
185
- expect(result.modelContext?.thinking_enabled).toBe(true);
186
- expect(result.modelContext?.thinking_available).toBe(true);
187
- expect(result.messages[0].content).toContain('Thinking enabled: true');
188
- expect(result.messages[0].content).toContain('Thinking available: true');
189
- });
190
- it('should handle models with always-on reasoning', async () => {
191
- const result = await service.createMessages({
192
- template: 'Provider: {{provider_id}}, Model: {{model_id}}',
193
- providerId: 'gemini',
194
- modelId: 'gemini-2.5-pro'
195
- });
196
- expect(result.modelContext).not.toBeNull();
197
- expect(result.modelContext?.provider_id).toBe('gemini');
198
- expect(result.modelContext?.model_id).toBe('gemini-2.5-pro');
199
- });
200
- });
201
- describe('Variable precedence', () => {
202
- it('should allow user variables to override model context', async () => {
203
- const result = await service.createMessages({
204
- template: 'Model: {{model_id}}',
205
- variables: { model_id: 'user-override' },
206
- presetId: 'openai-gpt-4.1-default'
207
- });
208
- expect(result.messages).toEqual([
209
- { role: 'user', content: 'Model: user-override' }
210
- ]);
211
- });
212
- it('should merge variables correctly', async () => {
213
- const result = await service.createMessages({
214
- template: 'Model: {{model_id}}, Task: {{task}}, Thinking: {{thinking_enabled}}',
215
- variables: { task: 'code review' },
216
- presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
217
- });
218
- expect(result.messages[0].content).toBe('Model: claude-3-7-sonnet-20250219, Task: code review, Thinking: true');
219
- });
220
- });
221
- describe('Template metadata parsing', () => {
222
- it('should extract settings from META block', async () => {
223
- const result = await service.createMessages({
224
- template: `<META>
225
- {
226
- "settings": {
227
- "temperature": 0.9,
228
- "thinkingExtraction": { "enabled": true, "tag": "reasoning" }
229
- }
230
- }
231
- </META>
232
- <SYSTEM>You are a creative writer.</SYSTEM>
233
- <USER>Write a story about {{topic}}</USER>`,
234
- variables: { topic: 'a robot discovering music' }
235
- });
236
- expect(result.messages).toEqual([
237
- { role: 'system', content: 'You are a creative writer.' },
238
- { role: 'user', content: 'Write a story about a robot discovering music' }
239
- ]);
240
- expect(result.settings).toEqual({
241
- temperature: 0.9,
242
- thinkingExtraction: { enabled: true, tag: 'reasoning' }
243
- });
244
- });
245
- it('should return empty settings when no META block exists', async () => {
246
- const result = await service.createMessages({
247
- template: '<USER>Simple message</USER>'
248
- });
249
- expect(result.messages).toEqual([
250
- { role: 'user', content: 'Simple message' }
251
- ]);
252
- expect(result.settings).toEqual({});
253
- });
254
- it('should handle invalid settings in META block with warnings', async () => {
255
- const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation();
256
- const result = await service.createMessages({
257
- template: `<META>
258
- {
259
- "settings": {
260
- "temperature": 3.0,
261
- "unknownSetting": "value",
262
- "maxTokens": -50
263
- }
264
- }
265
- </META>
266
- <USER>Test</USER>`
267
- });
268
- expect(result.messages).toEqual([
269
- { role: 'user', content: 'Test' }
270
- ]);
271
- expect(result.settings).toEqual({}); // All settings were invalid
272
- expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid temperature value'));
273
- expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Unknown setting "unknownSetting"'));
274
- expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid maxTokens value'));
275
- consoleWarnSpy.mockRestore();
276
- });
277
- it('should work with model context and META settings', async () => {
278
- const result = await service.createMessages({
279
- template: `<META>
280
- {
281
- "settings": {
282
- "temperature": 0.7,
283
- "maxTokens": 2000
284
- }
285
- }
286
- </META>
287
- <SYSTEM>You are a {{ thinking_enabled ? "thoughtful" : "quick" }} assistant.</SYSTEM>
288
- <USER>Help me understand {{concept}}</USER>`,
289
- variables: { concept: 'recursion' },
290
- presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
291
- });
292
- expect(result.messages).toEqual([
293
- { role: 'system', content: 'You are a thoughtful assistant.' },
294
- { role: 'user', content: 'Help me understand recursion' }
295
- ]);
296
- expect(result.settings).toEqual({
297
- temperature: 0.7,
298
- maxTokens: 2000
299
- });
300
- expect(result.modelContext).not.toBeNull();
301
- expect(result.modelContext?.thinking_enabled).toBe(true);
302
- });
303
- it('should validate complex nested settings', async () => {
304
- const result = await service.createMessages({
305
- template: `<META>
306
- {
307
- "settings": {
308
- "reasoning": {
309
- "enabled": true,
310
- "effort": "high",
311
- "maxTokens": 5000
312
- },
313
- "stopSequences": ["\\n\\n", "END"],
314
- "frequencyPenalty": 0.5
315
- }
316
- }
317
- </META>
318
- <USER>Complex request</USER>`
319
- });
320
- expect(result.settings).toEqual({
321
- reasoning: {
322
- enabled: true,
323
- effort: 'high',
324
- maxTokens: 5000
325
- },
326
- stopSequences: ['\n\n', 'END'],
327
- frequencyPenalty: 0.5
328
- });
329
- });
330
- it('should handle invalid nested settings gracefully', async () => {
331
- const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation();
332
- const result = await service.createMessages({
333
- template: `<META>
334
- {
335
- "settings": {
336
- "reasoning": {
337
- "enabled": "yes",
338
- "effort": "maximum",
339
- "maxTokens": -1000
340
- }
341
- }
342
- }
343
- </META>
344
- <USER>Test</USER>`
345
- });
346
- expect(result.settings).toEqual({}); // All fields were invalid, so empty object
347
- expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid reasoning.enabled'));
348
- expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid reasoning.effort'));
349
- expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid reasoning.maxTokens'));
350
- consoleWarnSpy.mockRestore();
351
- });
352
- it('should maintain backward compatibility for callers not using settings', async () => {
353
- // Old code that destructures without settings should still work
354
- const { messages, modelContext } = await service.createMessages({
355
- template: `<META>{"settings": {"temperature": 0.8}}</META><USER>Test</USER>`
356
- });
357
- expect(messages).toEqual([
358
- { role: 'user', content: 'Test' }
359
- ]);
360
- expect(modelContext).toBeNull();
361
- // settings field exists but old code doesn't need to know about it
362
- });
363
- });
364
- });
@@ -1,147 +0,0 @@
1
- import type { ApiKeyProvider } from '../types';
2
- import type { LLMChatRequest, LLMChatRequestWithPreset, LLMResponse, LLMFailureResponse, ProviderInfo, ModelInfo, ApiProviderId, PrepareMessageOptions, PrepareMessageResult } from "./types";
3
- import type { ILLMClientAdapter } from "./clients/types";
4
- import type { ModelPreset } from "../types/presets";
5
- /**
6
- * Defines how custom presets interact with the default presets.
7
- * 'replace': Use only the custom presets provided. The default set is ignored.
8
- * 'extend': Use the default presets, and add/override them with the custom presets. This is the default behavior.
9
- */
10
- export type PresetMode = 'replace' | 'extend';
11
- /**
12
- * Options for configuring the LLMService
13
- */
14
- export interface LLMServiceOptions {
15
- /** An array of custom presets to integrate. */
16
- presets?: ModelPreset[];
17
- /** The strategy for integrating custom presets. Defaults to 'extend'. */
18
- presetMode?: PresetMode;
19
- }
20
- /**
21
- * Main process service for LLM operations
22
- *
23
- * This service:
24
- * - Manages LLM provider client adapters
25
- * - Integrates with ApiKeyServiceMain for secure API key access
26
- * - Validates requests and applies default settings
27
- * - Routes requests to appropriate provider adapters
28
- * - Handles errors and provides standardized responses
29
- * - Provides configurable model presets for common use cases
30
- */
31
- export declare class LLMService {
32
- private getApiKey;
33
- private clientAdapters;
34
- private mockClientAdapter;
35
- private presets;
36
- constructor(getApiKey: ApiKeyProvider, options?: LLMServiceOptions);
37
- /**
38
- * Gets list of supported LLM providers
39
- *
40
- * @returns Promise resolving to array of provider information
41
- */
42
- getProviders(): Promise<ProviderInfo[]>;
43
- /**
44
- * Gets list of supported models for a specific provider
45
- *
46
- * @param providerId - The provider ID to get models for
47
- * @returns Promise resolving to array of model information
48
- */
49
- getModels(providerId: ApiProviderId): Promise<ModelInfo[]>;
50
- /**
51
- * Sends a chat message to an LLM provider
52
- *
53
- * @param request - The LLM chat request
54
- * @returns Promise resolving to either success or failure response
55
- */
56
- sendMessage(request: LLMChatRequest | LLMChatRequestWithPreset): Promise<LLMResponse | LLMFailureResponse>;
57
- /**
58
- * Validates basic LLM request structure
59
- *
60
- * @param request - The request to validate
61
- * @returns LLMFailureResponse if validation fails, null if valid
62
- */
63
- private validateRequestStructure;
64
- /**
65
- * Validates reasoning settings against model capabilities
66
- *
67
- * @param modelInfo - The model information
68
- * @param reasoning - The reasoning settings to validate
69
- * @param request - The original request for error context
70
- * @returns LLMFailureResponse if validation fails, null if valid
71
- */
72
- private validateReasoningSettings;
73
- /**
74
- * Merges request settings with model-specific and global defaults
75
- *
76
- * @param modelId - The model ID to get defaults for
77
- * @param providerId - The provider ID to get defaults for
78
- * @param requestSettings - Settings from the request
79
- * @returns Complete settings object with all required fields
80
- */
81
- private mergeSettingsForModel;
82
- /**
83
- * Gets the appropriate client adapter for a provider
84
- *
85
- * @param providerId - The provider ID
86
- * @returns The client adapter to use
87
- */
88
- private getClientAdapter;
89
- /**
90
- * Registers a client adapter for a specific provider
91
- *
92
- * @param providerId - The provider ID
93
- * @param adapter - The client adapter implementation
94
- */
95
- registerClientAdapter(providerId: ApiProviderId, adapter: ILLMClientAdapter): void;
96
- /**
97
- * Gets information about registered adapters
98
- *
99
- * @returns Map of provider IDs to adapter info
100
- */
101
- getRegisteredAdapters(): Map<ApiProviderId, any>;
102
- /**
103
- * Gets a summary of available providers and their adapter status
104
- *
105
- * @returns Summary of provider availability
106
- */
107
- getProviderSummary(): {
108
- totalProviders: number;
109
- providersWithAdapters: number;
110
- availableProviders: string[];
111
- unavailableProviders: string[];
112
- };
113
- /**
114
- * Gets all configured model presets
115
- *
116
- * @returns Array of model presets
117
- */
118
- getPresets(): ModelPreset[];
119
- /**
120
- * Resolves model information from either a preset ID or provider/model IDs
121
- *
122
- * @private
123
- * @param options Options containing either presetId or providerId/modelId
124
- * @returns Resolved model info and settings or error response
125
- */
126
- private resolveModelInfo;
127
- /**
128
- * Prepares messages with model context for template rendering
129
- *
130
- * This method resolves model information from either a preset or direct provider/model IDs,
131
- * then renders a template with model context variables injected, or returns pre-built messages
132
- * with the model context separately.
133
- *
134
- * @param options Options for preparing messages
135
- * @returns Promise resolving to prepared messages and model context
136
- *
137
- * @example
138
- * ```typescript
139
- * const { messages } = await llm.prepareMessage({
140
- * template: 'Help me {{ thinking_enabled ? "think through" : "solve" }} this: {{ problem }}',
141
- * variables: { problem: 'complex algorithm' },
142
- * presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
143
- * });
144
- * ```
145
- */
146
- prepareMessage(options: PrepareMessageOptions): Promise<PrepareMessageResult | LLMFailureResponse>;
147
- }