genai-lite 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/README.md +382 -49
  2. package/dist/index.d.ts +3 -3
  3. package/dist/index.js +4 -3
  4. package/dist/llm/LLMService.createMessages.test.d.ts +4 -0
  5. package/dist/llm/LLMService.createMessages.test.js +364 -0
  6. package/dist/llm/LLMService.d.ts +48 -83
  7. package/dist/llm/LLMService.js +172 -480
  8. package/dist/llm/LLMService.original.d.ts +147 -0
  9. package/dist/llm/LLMService.original.js +656 -0
  10. package/dist/llm/LLMService.test.js +192 -0
  11. package/dist/llm/clients/AnthropicClientAdapter.test.js +4 -0
  12. package/dist/llm/clients/GeminiClientAdapter.test.js +4 -0
  13. package/dist/llm/clients/MockClientAdapter.js +9 -3
  14. package/dist/llm/clients/MockClientAdapter.test.js +4 -0
  15. package/dist/llm/clients/OpenAIClientAdapter.test.js +4 -0
  16. package/dist/llm/config.js +5 -0
  17. package/dist/llm/services/AdapterRegistry.d.ts +59 -0
  18. package/dist/llm/services/AdapterRegistry.js +113 -0
  19. package/dist/llm/services/AdapterRegistry.test.d.ts +1 -0
  20. package/dist/llm/services/AdapterRegistry.test.js +239 -0
  21. package/dist/llm/services/ModelResolver.d.ts +35 -0
  22. package/dist/llm/services/ModelResolver.js +116 -0
  23. package/dist/llm/services/ModelResolver.test.d.ts +1 -0
  24. package/dist/llm/services/ModelResolver.test.js +158 -0
  25. package/dist/llm/services/PresetManager.d.ts +27 -0
  26. package/dist/llm/services/PresetManager.js +50 -0
  27. package/dist/llm/services/PresetManager.test.d.ts +1 -0
  28. package/dist/llm/services/PresetManager.test.js +210 -0
  29. package/dist/llm/services/RequestValidator.d.ts +31 -0
  30. package/dist/llm/services/RequestValidator.js +122 -0
  31. package/dist/llm/services/RequestValidator.test.d.ts +1 -0
  32. package/dist/llm/services/RequestValidator.test.js +159 -0
  33. package/dist/llm/services/SettingsManager.d.ts +32 -0
  34. package/dist/llm/services/SettingsManager.js +223 -0
  35. package/dist/llm/services/SettingsManager.test.d.ts +1 -0
  36. package/dist/llm/services/SettingsManager.test.js +266 -0
  37. package/dist/llm/types.d.ts +29 -28
  38. package/dist/prompting/builder.d.ts +4 -0
  39. package/dist/prompting/builder.js +12 -61
  40. package/dist/prompting/content.js +3 -9
  41. package/dist/prompting/index.d.ts +2 -3
  42. package/dist/prompting/index.js +4 -5
  43. package/dist/prompting/parser.d.ts +80 -0
  44. package/dist/prompting/parser.js +133 -0
  45. package/dist/prompting/parser.test.js +348 -0
  46. package/dist/prompting/template.d.ts +8 -0
  47. package/dist/prompting/template.js +89 -6
  48. package/dist/prompting/template.test.js +116 -0
  49. package/package.json +1 -1
@@ -0,0 +1,364 @@
1
+ "use strict";
2
+ /**
3
+ * Tests for LLMService.createMessages method
4
+ */
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ const LLMService_1 = require("./LLMService");
7
+ // Create a mock API key provider
8
+ const mockApiKeyProvider = async () => 'test-api-key';
9
+ describe('LLMService.createMessages', () => {
10
+ let service;
11
+ beforeEach(() => {
12
+ service = new LLMService_1.LLMService(mockApiKeyProvider);
13
+ });
14
+ describe('Basic template parsing', () => {
15
+ it('should parse simple template without model context', async () => {
16
+ const result = await service.createMessages({
17
+ template: 'Hello, how can I help you?'
18
+ });
19
+ expect(result.messages).toEqual([
20
+ { role: 'user', content: 'Hello, how can I help you?' }
21
+ ]);
22
+ expect(result.modelContext).toBeNull();
23
+ });
24
+ it('should parse multi-turn template without model context', async () => {
25
+ const result = await service.createMessages({
26
+ template: `
27
+ <SYSTEM>You are a helpful assistant.</SYSTEM>
28
+ <USER>Hello</USER>
29
+ <ASSISTANT>Hi! How can I help you today?</ASSISTANT>
30
+ <USER>Can you explain {{topic}}?</USER>
31
+ `,
32
+ variables: { topic: 'promises in JavaScript' }
33
+ });
34
+ expect(result.messages).toEqual([
35
+ { role: 'system', content: 'You are a helpful assistant.' },
36
+ { role: 'user', content: 'Hello' },
37
+ { role: 'assistant', content: 'Hi! How can I help you today?' },
38
+ { role: 'user', content: 'Can you explain promises in JavaScript?' }
39
+ ]);
40
+ expect(result.modelContext).toBeNull();
41
+ });
42
+ });
43
+ describe('Model-aware templates', () => {
44
+ it('should inject model context for valid preset', async () => {
45
+ const result = await service.createMessages({
46
+ template: `
47
+ <SYSTEM>You are a {{ thinking_enabled ? "thoughtful" : "standard" }} assistant.</SYSTEM>
48
+ <USER>Help me understand {{concept}}</USER>
49
+ `,
50
+ variables: { concept: 'recursion' },
51
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
52
+ });
53
+ expect(result.modelContext).not.toBeNull();
54
+ expect(result.modelContext?.thinking_enabled).toBe(true);
55
+ expect(result.modelContext?.thinking_available).toBe(true);
56
+ expect(result.modelContext?.model_id).toBe('claude-3-7-sonnet-20250219');
57
+ expect(result.modelContext?.provider_id).toBe('anthropic');
58
+ expect(result.messages).toEqual([
59
+ { role: 'system', content: 'You are a thoughtful assistant.' },
60
+ { role: 'user', content: 'Help me understand recursion' }
61
+ ]);
62
+ });
63
+ it('should inject model context for valid provider/model combo', async () => {
64
+ const result = await service.createMessages({
65
+ template: 'Model: {{model_id}}, Provider: {{provider_id}}',
66
+ providerId: 'openai',
67
+ modelId: 'gpt-4.1'
68
+ });
69
+ expect(result.modelContext).not.toBeNull();
70
+ expect(result.modelContext?.model_id).toBe('gpt-4.1');
71
+ expect(result.modelContext?.provider_id).toBe('openai');
72
+ expect(result.messages).toEqual([
73
+ { role: 'user', content: 'Model: gpt-4.1, Provider: openai' }
74
+ ]);
75
+ });
76
+ it('should handle model without reasoning support', async () => {
77
+ const result = await service.createMessages({
78
+ template: 'Thinking available: {{thinking_available}}, enabled: {{thinking_enabled}}',
79
+ presetId: 'openai-gpt-4.1-default'
80
+ });
81
+ expect(result.modelContext?.thinking_available).toBe(false);
82
+ expect(result.modelContext?.thinking_enabled).toBe(false);
83
+ });
84
+ });
85
+ describe('Complex scenarios', () => {
86
+ it('should handle variables that inject role tags', async () => {
87
+ const result = await service.createMessages({
88
+ template: `
89
+ <SYSTEM>Base system prompt</SYSTEM>
90
+ {{extraMessages}}
91
+ <USER>Final question</USER>
92
+ `,
93
+ variables: {
94
+ extraMessages: '<USER>First question</USER>\n<ASSISTANT>First answer</ASSISTANT>'
95
+ }
96
+ });
97
+ expect(result.messages).toEqual([
98
+ { role: 'system', content: 'Base system prompt' },
99
+ { role: 'user', content: 'First question' },
100
+ { role: 'assistant', content: 'First answer' },
101
+ { role: 'user', content: 'Final question' }
102
+ ]);
103
+ });
104
+ it('should handle conditional role injection based on model context', async () => {
105
+ const result = await service.createMessages({
106
+ template: `
107
+ {{ thinking_enabled ? '<SYSTEM>Think step-by-step before answering.</SYSTEM>' : '' }}
108
+ <USER>Solve: {{problem}}</USER>
109
+ `,
110
+ variables: { problem: 'What is 15% of 240?' },
111
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
112
+ });
113
+ expect(result.messages).toEqual([
114
+ { role: 'system', content: 'Think step-by-step before answering.' },
115
+ { role: 'user', content: 'Solve: What is 15% of 240?' }
116
+ ]);
117
+ });
118
+ it('should handle nested conditionals with model context', async () => {
119
+ const result = await service.createMessages({
120
+ template: `
121
+ <SYSTEM>
122
+ You are using {{ model_id || "no model" }}.
123
+ {{ thinking_available ? 'You have reasoning capabilities.' : 'Standard model.' }}
124
+ </SYSTEM>
125
+ <USER>Hello</USER>
126
+ `,
127
+ providerId: 'anthropic',
128
+ modelId: 'claude-3-5-haiku-20241022'
129
+ });
130
+ expect(result.messages[0].role).toBe('system');
131
+ // Check that we have model context
132
+ expect(result.modelContext).not.toBeNull();
133
+ if (result.modelContext) {
134
+ expect(result.modelContext.model_id).toBe('claude-3-5-haiku-20241022');
135
+ expect(result.modelContext.thinking_available).toBe(false);
136
+ }
137
+ });
138
+ });
139
+ describe('Error handling', () => {
140
+ it('should proceed without model context on invalid preset', async () => {
141
+ const result = await service.createMessages({
142
+ template: 'Has model context: {{model_id ? "yes" : "no"}}',
143
+ presetId: 'invalid-preset-id'
144
+ });
145
+ expect(result.modelContext).toBeNull();
146
+ expect(result.messages).toEqual([
147
+ { role: 'user', content: 'Has model context: no' }
148
+ ]);
149
+ });
150
+ it('should handle invalid template syntax gracefully', async () => {
151
+ const result = await service.createMessages({
152
+ template: 'Unclosed conditional: {{ if true',
153
+ variables: {}
154
+ });
155
+ // The template engine doesn't throw errors for invalid syntax, it renders as-is
156
+ expect(result.messages).toEqual([
157
+ { role: 'user', content: 'Unclosed conditional: {{ if true' }
158
+ ]);
159
+ });
160
+ it('should handle empty template', async () => {
161
+ const result = await service.createMessages({
162
+ template: ''
163
+ });
164
+ expect(result.messages).toEqual([]);
165
+ expect(result.modelContext).toBeNull();
166
+ });
167
+ it('should handle whitespace-only template', async () => {
168
+ const result = await service.createMessages({
169
+ template: ' \n\t '
170
+ });
171
+ expect(result.messages).toEqual([]);
172
+ expect(result.modelContext).toBeNull();
173
+ });
174
+ });
175
+ describe('Integration with reasoning settings', () => {
176
+ it('should handle reasoning settings in model context', async () => {
177
+ const result = await service.createMessages({
178
+ template: `
179
+ Thinking enabled: {{thinking_enabled}}
180
+ Thinking available: {{thinking_available}}
181
+ Model: {{model_id}}
182
+ `,
183
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
184
+ });
185
+ expect(result.modelContext?.thinking_enabled).toBe(true);
186
+ expect(result.modelContext?.thinking_available).toBe(true);
187
+ expect(result.messages[0].content).toContain('Thinking enabled: true');
188
+ expect(result.messages[0].content).toContain('Thinking available: true');
189
+ });
190
+ it('should handle models with always-on reasoning', async () => {
191
+ const result = await service.createMessages({
192
+ template: 'Provider: {{provider_id}}, Model: {{model_id}}',
193
+ providerId: 'gemini',
194
+ modelId: 'gemini-2.5-pro'
195
+ });
196
+ expect(result.modelContext).not.toBeNull();
197
+ expect(result.modelContext?.provider_id).toBe('gemini');
198
+ expect(result.modelContext?.model_id).toBe('gemini-2.5-pro');
199
+ });
200
+ });
201
+ describe('Variable precedence', () => {
202
+ it('should allow user variables to override model context', async () => {
203
+ const result = await service.createMessages({
204
+ template: 'Model: {{model_id}}',
205
+ variables: { model_id: 'user-override' },
206
+ presetId: 'openai-gpt-4.1-default'
207
+ });
208
+ expect(result.messages).toEqual([
209
+ { role: 'user', content: 'Model: user-override' }
210
+ ]);
211
+ });
212
+ it('should merge variables correctly', async () => {
213
+ const result = await service.createMessages({
214
+ template: 'Model: {{model_id}}, Task: {{task}}, Thinking: {{thinking_enabled}}',
215
+ variables: { task: 'code review' },
216
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
217
+ });
218
+ expect(result.messages[0].content).toBe('Model: claude-3-7-sonnet-20250219, Task: code review, Thinking: true');
219
+ });
220
+ });
221
+ describe('Template metadata parsing', () => {
222
+ it('should extract settings from META block', async () => {
223
+ const result = await service.createMessages({
224
+ template: `<META>
225
+ {
226
+ "settings": {
227
+ "temperature": 0.9,
228
+ "thinkingExtraction": { "enabled": true, "tag": "reasoning" }
229
+ }
230
+ }
231
+ </META>
232
+ <SYSTEM>You are a creative writer.</SYSTEM>
233
+ <USER>Write a story about {{topic}}</USER>`,
234
+ variables: { topic: 'a robot discovering music' }
235
+ });
236
+ expect(result.messages).toEqual([
237
+ { role: 'system', content: 'You are a creative writer.' },
238
+ { role: 'user', content: 'Write a story about a robot discovering music' }
239
+ ]);
240
+ expect(result.settings).toEqual({
241
+ temperature: 0.9,
242
+ thinkingExtraction: { enabled: true, tag: 'reasoning' }
243
+ });
244
+ });
245
+ it('should return empty settings when no META block exists', async () => {
246
+ const result = await service.createMessages({
247
+ template: '<USER>Simple message</USER>'
248
+ });
249
+ expect(result.messages).toEqual([
250
+ { role: 'user', content: 'Simple message' }
251
+ ]);
252
+ expect(result.settings).toEqual({});
253
+ });
254
+ it('should handle invalid settings in META block with warnings', async () => {
255
+ const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation();
256
+ const result = await service.createMessages({
257
+ template: `<META>
258
+ {
259
+ "settings": {
260
+ "temperature": 3.0,
261
+ "unknownSetting": "value",
262
+ "maxTokens": -50
263
+ }
264
+ }
265
+ </META>
266
+ <USER>Test</USER>`
267
+ });
268
+ expect(result.messages).toEqual([
269
+ { role: 'user', content: 'Test' }
270
+ ]);
271
+ expect(result.settings).toEqual({}); // All settings were invalid
272
+ expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid temperature value'));
273
+ expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Unknown setting "unknownSetting"'));
274
+ expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid maxTokens value'));
275
+ consoleWarnSpy.mockRestore();
276
+ });
277
+ it('should work with model context and META settings', async () => {
278
+ const result = await service.createMessages({
279
+ template: `<META>
280
+ {
281
+ "settings": {
282
+ "temperature": 0.7,
283
+ "maxTokens": 2000
284
+ }
285
+ }
286
+ </META>
287
+ <SYSTEM>You are a {{ thinking_enabled ? "thoughtful" : "quick" }} assistant.</SYSTEM>
288
+ <USER>Help me understand {{concept}}</USER>`,
289
+ variables: { concept: 'recursion' },
290
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
291
+ });
292
+ expect(result.messages).toEqual([
293
+ { role: 'system', content: 'You are a thoughtful assistant.' },
294
+ { role: 'user', content: 'Help me understand recursion' }
295
+ ]);
296
+ expect(result.settings).toEqual({
297
+ temperature: 0.7,
298
+ maxTokens: 2000
299
+ });
300
+ expect(result.modelContext).not.toBeNull();
301
+ expect(result.modelContext?.thinking_enabled).toBe(true);
302
+ });
303
+ it('should validate complex nested settings', async () => {
304
+ const result = await service.createMessages({
305
+ template: `<META>
306
+ {
307
+ "settings": {
308
+ "reasoning": {
309
+ "enabled": true,
310
+ "effort": "high",
311
+ "maxTokens": 5000
312
+ },
313
+ "stopSequences": ["\\n\\n", "END"],
314
+ "frequencyPenalty": 0.5
315
+ }
316
+ }
317
+ </META>
318
+ <USER>Complex request</USER>`
319
+ });
320
+ expect(result.settings).toEqual({
321
+ reasoning: {
322
+ enabled: true,
323
+ effort: 'high',
324
+ maxTokens: 5000
325
+ },
326
+ stopSequences: ['\n\n', 'END'],
327
+ frequencyPenalty: 0.5
328
+ });
329
+ });
330
+ it('should handle invalid nested settings gracefully', async () => {
331
+ const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation();
332
+ const result = await service.createMessages({
333
+ template: `<META>
334
+ {
335
+ "settings": {
336
+ "reasoning": {
337
+ "enabled": "yes",
338
+ "effort": "maximum",
339
+ "maxTokens": -1000
340
+ }
341
+ }
342
+ }
343
+ </META>
344
+ <USER>Test</USER>`
345
+ });
346
+ expect(result.settings).toEqual({}); // All fields were invalid, so empty object
347
+ expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid reasoning.enabled'));
348
+ expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid reasoning.effort'));
349
+ expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid reasoning.maxTokens'));
350
+ consoleWarnSpy.mockRestore();
351
+ });
352
+ it('should maintain backward compatibility for callers not using settings', async () => {
353
+ // Old code that destructures without settings should still work
354
+ const { messages, modelContext } = await service.createMessages({
355
+ template: `<META>{"settings": {"temperature": 0.8}}</META><USER>Test</USER>`
356
+ });
357
+ expect(messages).toEqual([
358
+ { role: 'user', content: 'Test' }
359
+ ]);
360
+ expect(modelContext).toBeNull();
361
+ // settings field exists but old code doesn't need to know about it
362
+ });
363
+ });
364
+ });
@@ -1,13 +1,8 @@
1
1
  import type { ApiKeyProvider } from '../types';
2
- import type { LLMChatRequest, LLMChatRequestWithPreset, LLMResponse, LLMFailureResponse, ProviderInfo, ModelInfo, ApiProviderId, PrepareMessageOptions, PrepareMessageResult } from "./types";
3
- import type { ILLMClientAdapter } from "./clients/types";
2
+ import type { LLMChatRequest, LLMChatRequestWithPreset, LLMResponse, LLMFailureResponse, ProviderInfo, ModelInfo, ApiProviderId, LLMSettings, ModelContext, LLMMessage } from "./types";
4
3
  import type { ModelPreset } from "../types/presets";
5
- /**
6
- * Defines how custom presets interact with the default presets.
7
- * 'replace': Use only the custom presets provided. The default set is ignored.
8
- * 'extend': Use the default presets, and add/override them with the custom presets. This is the default behavior.
9
- */
10
- export type PresetMode = 'replace' | 'extend';
4
+ import { type PresetMode } from "./services/PresetManager";
5
+ export type { PresetMode };
11
6
  /**
12
7
  * Options for configuring the LLMService
13
8
  */
@@ -17,6 +12,17 @@ export interface LLMServiceOptions {
17
12
  /** The strategy for integrating custom presets. Defaults to 'extend'. */
18
13
  presetMode?: PresetMode;
19
14
  }
15
+ /**
16
+ * Result from createMessages method
17
+ */
18
+ export interface CreateMessagesResult {
19
+ /** The parsed messages with role assignments */
20
+ messages: LLMMessage[];
21
+ /** Model context variables that were injected during template rendering */
22
+ modelContext: ModelContext | null;
23
+ /** Settings extracted from the template's <META> block */
24
+ settings: Partial<LLMSettings>;
25
+ }
20
26
  /**
21
27
  * Main process service for LLM operations
22
28
  *
@@ -30,9 +36,11 @@ export interface LLMServiceOptions {
30
36
  */
31
37
  export declare class LLMService {
32
38
  private getApiKey;
33
- private clientAdapters;
34
- private mockClientAdapter;
35
- private presets;
39
+ private presetManager;
40
+ private adapterRegistry;
41
+ private requestValidator;
42
+ private settingsManager;
43
+ private modelResolver;
36
44
  constructor(getApiKey: ApiKeyProvider, options?: LLMServiceOptions);
37
45
  /**
38
46
  * Gets list of supported LLM providers
@@ -55,93 +63,50 @@ export declare class LLMService {
55
63
  */
56
64
  sendMessage(request: LLMChatRequest | LLMChatRequestWithPreset): Promise<LLMResponse | LLMFailureResponse>;
57
65
  /**
58
- * Validates basic LLM request structure
59
- *
60
- * @param request - The request to validate
61
- * @returns LLMFailureResponse if validation fails, null if valid
62
- */
63
- private validateRequestStructure;
64
- /**
65
- * Validates reasoning settings against model capabilities
66
+ * Gets all configured model presets
66
67
  *
67
- * @param modelInfo - The model information
68
- * @param reasoning - The reasoning settings to validate
69
- * @param request - The original request for error context
70
- * @returns LLMFailureResponse if validation fails, null if valid
68
+ * @returns Array of model presets
71
69
  */
72
- private validateReasoningSettings;
70
+ getPresets(): ModelPreset[];
73
71
  /**
74
- * Merges request settings with model-specific and global defaults
72
+ * Creates messages from a template with role tags and model-aware variable substitution
75
73
  *
76
- * @param modelId - The model ID to get defaults for
77
- * @param providerId - The provider ID to get defaults for
78
- * @param requestSettings - Settings from the request
79
- * @returns Complete settings object with all required fields
80
- */
81
- private mergeSettingsForModel;
82
- /**
83
- * Gets the appropriate client adapter for a provider
74
+ * This unified method combines the functionality of template rendering, model context
75
+ * injection, and role tag parsing into a single, intuitive API. It replaces the need
76
+ * to chain prepareMessage and buildMessagesFromTemplate for model-aware multi-turn prompts.
84
77
  *
85
- * @param providerId - The provider ID
86
- * @returns The client adapter to use
87
- */
88
- private getClientAdapter;
89
- /**
90
- * Registers a client adapter for a specific provider
78
+ * @param options Options for creating messages
79
+ * @returns Promise resolving to parsed messages and model context
91
80
  *
92
- * @param providerId - The provider ID
93
- * @param adapter - The client adapter implementation
81
+ * @example
82
+ * ```typescript
83
+ * const { messages } = await llm.createMessages({
84
+ * template: `
85
+ * <SYSTEM>You are a {{ thinking_enabled ? "thoughtful" : "helpful" }} assistant.</SYSTEM>
86
+ * <USER>Help me with {{ task }}</USER>
87
+ * `,
88
+ * variables: { task: 'understanding async/await' },
89
+ * presetId: 'openai-gpt-4.1-default'
90
+ * });
91
+ * ```
94
92
  */
95
- registerClientAdapter(providerId: ApiProviderId, adapter: ILLMClientAdapter): void;
93
+ createMessages(options: {
94
+ template: string;
95
+ variables?: Record<string, any>;
96
+ presetId?: string;
97
+ providerId?: string;
98
+ modelId?: string;
99
+ }): Promise<CreateMessagesResult>;
96
100
  /**
97
101
  * Gets information about registered adapters
98
102
  *
99
103
  * @returns Map of provider IDs to adapter info
100
104
  */
101
- getRegisteredAdapters(): Map<ApiProviderId, any>;
105
+ getRegisteredAdapters(): Map<string, import("./services/AdapterRegistry").AdapterInfo>;
102
106
  /**
103
107
  * Gets a summary of available providers and their adapter status
104
108
  *
105
109
  * @returns Summary of provider availability
106
110
  */
107
- getProviderSummary(): {
108
- totalProviders: number;
109
- providersWithAdapters: number;
110
- availableProviders: string[];
111
- unavailableProviders: string[];
112
- };
113
- /**
114
- * Gets all configured model presets
115
- *
116
- * @returns Array of model presets
117
- */
118
- getPresets(): ModelPreset[];
119
- /**
120
- * Resolves model information from either a preset ID or provider/model IDs
121
- *
122
- * @private
123
- * @param options Options containing either presetId or providerId/modelId
124
- * @returns Resolved model info and settings or error response
125
- */
126
- private resolveModelInfo;
127
- /**
128
- * Prepares messages with model context for template rendering
129
- *
130
- * This method resolves model information from either a preset or direct provider/model IDs,
131
- * then renders a template with model context variables injected, or returns pre-built messages
132
- * with the model context separately.
133
- *
134
- * @param options Options for preparing messages
135
- * @returns Promise resolving to prepared messages and model context
136
- *
137
- * @example
138
- * ```typescript
139
- * const { messages } = await llm.prepareMessage({
140
- * template: 'Help me {{ thinking_enabled ? "think through" : "solve" }} this: {{ problem }}',
141
- * variables: { problem: 'complex algorithm' },
142
- * presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
143
- * });
144
- * ```
145
- */
146
- prepareMessage(options: PrepareMessageOptions): Promise<PrepareMessageResult | LLMFailureResponse>;
111
+ getProviderSummary(): import("./services/AdapterRegistry").ProviderSummary;
147
112
  }