genai-lite 0.4.0 → 0.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +47 -37
  2. package/dist/llm/LLMService.d.ts +29 -2
  3. package/dist/llm/LLMService.js +80 -36
  4. package/dist/llm/config.js +4 -4
  5. package/dist/llm/services/SettingsManager.js +17 -11
  6. package/dist/llm/types.d.ts +81 -22
  7. package/dist/prompting/parser.d.ts +2 -2
  8. package/dist/prompting/parser.js +2 -2
  9. package/package.json +1 -1
  10. package/dist/llm/LLMService.createMessages.test.d.ts +0 -4
  11. package/dist/llm/LLMService.createMessages.test.js +0 -364
  12. package/dist/llm/LLMService.original.d.ts +0 -147
  13. package/dist/llm/LLMService.original.js +0 -656
  14. package/dist/llm/LLMService.prepareMessage.test.d.ts +0 -1
  15. package/dist/llm/LLMService.prepareMessage.test.js +0 -303
  16. package/dist/llm/LLMService.presets.test.d.ts +0 -1
  17. package/dist/llm/LLMService.presets.test.js +0 -210
  18. package/dist/llm/LLMService.sendMessage.preset.test.d.ts +0 -1
  19. package/dist/llm/LLMService.sendMessage.preset.test.js +0 -153
  20. package/dist/llm/LLMService.test.d.ts +0 -1
  21. package/dist/llm/LLMService.test.js +0 -639
  22. package/dist/llm/clients/AnthropicClientAdapter.test.d.ts +0 -1
  23. package/dist/llm/clients/AnthropicClientAdapter.test.js +0 -273
  24. package/dist/llm/clients/GeminiClientAdapter.test.d.ts +0 -1
  25. package/dist/llm/clients/GeminiClientAdapter.test.js +0 -405
  26. package/dist/llm/clients/LlamaCppClientAdapter.test.d.ts +0 -1
  27. package/dist/llm/clients/LlamaCppClientAdapter.test.js +0 -447
  28. package/dist/llm/clients/LlamaCppServerClient.test.d.ts +0 -1
  29. package/dist/llm/clients/LlamaCppServerClient.test.js +0 -294
  30. package/dist/llm/clients/MockClientAdapter.test.d.ts +0 -1
  31. package/dist/llm/clients/MockClientAdapter.test.js +0 -250
  32. package/dist/llm/clients/OpenAIClientAdapter.test.d.ts +0 -1
  33. package/dist/llm/clients/OpenAIClientAdapter.test.js +0 -258
  34. package/dist/llm/clients/adapterErrorUtils.test.d.ts +0 -1
  35. package/dist/llm/clients/adapterErrorUtils.test.js +0 -123
  36. package/dist/llm/config.test.d.ts +0 -1
  37. package/dist/llm/config.test.js +0 -176
  38. package/dist/llm/services/AdapterRegistry.test.d.ts +0 -1
  39. package/dist/llm/services/AdapterRegistry.test.js +0 -239
  40. package/dist/llm/services/ModelResolver.test.d.ts +0 -1
  41. package/dist/llm/services/ModelResolver.test.js +0 -179
  42. package/dist/llm/services/PresetManager.test.d.ts +0 -1
  43. package/dist/llm/services/PresetManager.test.js +0 -210
  44. package/dist/llm/services/RequestValidator.test.d.ts +0 -1
  45. package/dist/llm/services/RequestValidator.test.js +0 -159
  46. package/dist/llm/services/SettingsManager.test.d.ts +0 -1
  47. package/dist/llm/services/SettingsManager.test.js +0 -266
  48. package/dist/prompting/builder.d.ts +0 -38
  49. package/dist/prompting/builder.js +0 -63
  50. package/dist/prompting/builder.test.d.ts +0 -4
  51. package/dist/prompting/builder.test.js +0 -109
  52. package/dist/prompting/content.test.d.ts +0 -4
  53. package/dist/prompting/content.test.js +0 -212
  54. package/dist/prompting/parser.test.d.ts +0 -4
  55. package/dist/prompting/parser.test.js +0 -464
  56. package/dist/prompting/template.test.d.ts +0 -1
  57. package/dist/prompting/template.test.js +0 -250
  58. package/dist/providers/fromEnvironment.test.d.ts +0 -1
  59. package/dist/providers/fromEnvironment.test.js +0 -59
@@ -43,28 +43,45 @@ export interface LLMReasoningSettings {
43
43
  exclude?: boolean;
44
44
  }
45
45
  /**
46
- * Settings for extracting 'thinking' content from the start of a response
46
+ * Settings for extracting reasoning from XML tags when native reasoning is not active.
47
+ *
48
+ * This is a fallback mechanism for getting reasoning from:
49
+ * 1. Models without native reasoning support (e.g., GPT-4, Claude 3.5)
50
+ * 2. Models with native reasoning disabled (to see the full reasoning trace)
51
+ *
52
+ * **Key use case:** Disable native reasoning on capable models to avoid obfuscation
53
+ * by providers, then prompt the model to use <thinking> tags for full visibility.
54
+ *
55
+ * **Important:** You must explicitly prompt the model to use thinking tags in your prompt.
56
+ * The library only extracts them - it doesn't generate them automatically.
47
57
  */
48
- export interface LLMThinkingExtractionSettings {
58
+ export interface LLMThinkingTagFallbackSettings {
49
59
  /**
50
- * If true, enables the automatic extraction of content from a specified XML tag.
51
- * @default false
60
+ * Enable tag extraction fallback.
61
+ * When this object exists, extraction is enabled by default (enabled: true).
62
+ * Set to false to explicitly disable (useful for overriding inherited settings).
63
+ * @default true (when thinkingTagFallback object exists)
52
64
  */
53
65
  enabled?: boolean;
54
66
  /**
55
- * The XML tag name to look for (e.g., 'thinking', 'reasoning', 'scratchpad').
67
+ * Name of the XML tag to extract.
56
68
  * @default 'thinking'
69
+ * @example tagName: 'scratchpad' will extract <scratchpad>...</scratchpad>
57
70
  */
58
- tag?: string;
71
+ tagName?: string;
59
72
  /**
60
- * Defines behavior when the tag is not found. 'auto' is the recommended default.
61
- * - 'ignore': Silently continue without a warning or error.
62
- * - 'warn': Log a console warning but return the response as-is.
63
- * - 'error': Return an LLMFailureResponse, treating it as a failed request.
64
- * - 'auto': Becomes 'error' unless the model has active native reasoning. If native reasoning is active, this becomes 'ignore'.
65
- * @default 'auto'
73
+ * Enforce that thinking tags are present when native reasoning is not active.
74
+ *
75
+ * When true:
76
+ * - If native reasoning is active: No enforcement (model using native)
77
+ * - If native reasoning is NOT active: Error if tags missing (fallback required)
78
+ *
79
+ * This is always "smart" - it automatically detects whether native reasoning
80
+ * is active and only enforces when the model needs to use tags as a fallback.
81
+ *
82
+ * @default false
66
83
  */
67
- onMissing?: 'ignore' | 'warn' | 'error' | 'auto';
84
+ enforce?: boolean;
68
85
  }
69
86
  /**
70
87
  * Configurable settings for LLM requests
@@ -91,10 +108,19 @@ export interface LLMSettings {
91
108
  /** Universal reasoning/thinking configuration */
92
109
  reasoning?: LLMReasoningSettings;
93
110
  /**
94
- * Configuration for automatically extracting 'thinking' blocks from responses.
95
- * Enabled by default.
111
+ * Extract reasoning from XML tags when native reasoning is not active.
112
+ *
113
+ * This is a fallback mechanism for getting reasoning from:
114
+ * 1. Models without native reasoning support (e.g., GPT-4, Claude 3.5)
115
+ * 2. Models with native reasoning disabled (to see the full reasoning trace)
116
+ *
117
+ * Key use case: Disable native reasoning on capable models to avoid obfuscation
118
+ * by providers, then prompt the model to use <thinking> tags for full visibility.
119
+ *
120
+ * Note: You must explicitly prompt the model to use thinking tags in your prompt.
121
+ * The library only extracts them - it doesn't generate them automatically.
96
122
  */
97
- thinkingExtraction?: LLMThinkingExtractionSettings;
123
+ thinkingTagFallback?: LLMThinkingTagFallbackSettings;
98
124
  }
99
125
  /**
100
126
  * Request structure for chat completion
@@ -252,18 +278,51 @@ export declare const LLM_IPC_CHANNELS: {
252
278
  */
253
279
  export type LLMIPCChannelName = (typeof LLM_IPC_CHANNELS)[keyof typeof LLM_IPC_CHANNELS];
254
280
  /**
255
- * Model context variables injected into templates
281
+ * Model context variables injected into templates during createMessages()
282
+ *
283
+ * These variables enable templates to adapt based on the model's reasoning capabilities.
284
+ *
285
+ * **Key Usage Pattern:**
286
+ * When adding thinking tag instructions, use requires_tags_for_thinking:
287
+ * ```
288
+ * {{ requires_tags_for_thinking ? 'Write your reasoning in <thinking> tags first.' : '' }}
289
+ * ```
290
+ *
291
+ * This ensures:
292
+ * - Models with active native reasoning get clean prompts
293
+ * - Models without native reasoning get explicit tag instructions
256
294
  */
257
295
  export interface ModelContext {
258
- /** Whether reasoning/thinking is enabled for this request */
259
- thinking_enabled: boolean;
260
- /** Whether the model supports reasoning/thinking */
261
- thinking_available: boolean;
296
+ /**
297
+ * Whether native reasoning is CURRENTLY ACTIVE for this request.
298
+ * - true: Model is using built-in reasoning (Claude 4, o4-mini, Gemini with reasoning enabled)
299
+ * - false: No native reasoning is active (model doesn't support it OR it's been disabled)
300
+ *
301
+ * Use in templates when adapting behavior based on whether native reasoning is happening.
302
+ */
303
+ native_reasoning_active: boolean;
304
+ /**
305
+ * Whether the model HAS THE CAPABILITY to use native reasoning.
306
+ * - true: Model supports native reasoning (may or may not be enabled)
307
+ * - false: Model does not support native reasoning
308
+ *
309
+ * Use in templates to check if native reasoning is possible (not necessarily active).
310
+ */
311
+ native_reasoning_capable: boolean;
312
+ /**
313
+ * Whether this model/request requires thinking tags to produce reasoning.
314
+ * - true: Native reasoning is not active, model needs prompting to use <thinking> tags
315
+ * - false: Native reasoning is active, no need for thinking tags
316
+ *
317
+ * Use in templates for conditional thinking tag instructions:
318
+ * {{ requires_tags_for_thinking ? 'Write your reasoning in <thinking> tags first.' : '' }}
319
+ */
320
+ requires_tags_for_thinking: boolean;
262
321
  /** The resolved model ID */
263
322
  model_id: string;
264
323
  /** The resolved provider ID */
265
324
  provider_id: string;
266
- /** Reasoning effort level if specified */
325
+ /** Reasoning effort level if specified ('low', 'medium', or 'high') */
267
326
  reasoning_effort?: string;
268
327
  /** Reasoning max tokens if specified */
269
328
  reasoning_max_tokens?: number;
@@ -103,7 +103,7 @@ export declare function parseRoleTags(template: string): Array<{
103
103
  * {
104
104
  * "settings": {
105
105
  * "temperature": 0.9,
106
- * "thinkingExtraction": { "enabled": true, "tag": "reasoning" }
106
+ * "thinkingTagFallback": { "enabled": true, "tagName": "reasoning" }
107
107
  * }
108
108
  * }
109
109
  * </META>
@@ -112,7 +112,7 @@ export declare function parseRoleTags(template: string): Array<{
112
112
  * `;
113
113
  *
114
114
  * const { metadata, content } = parseTemplateWithMetadata(template);
115
- * // metadata.settings will contain the temperature and thinkingExtraction settings
115
+ * // metadata.settings will contain the temperature and thinkingTagFallback settings
116
116
  * // content will contain the SYSTEM and USER tags
117
117
  */
118
118
  export declare function parseTemplateWithMetadata(template: string): {
@@ -146,7 +146,7 @@ function parseRoleTags(template) {
146
146
  * {
147
147
  * "settings": {
148
148
  * "temperature": 0.9,
149
- * "thinkingExtraction": { "enabled": true, "tag": "reasoning" }
149
+ * "thinkingTagFallback": { "enabled": true, "tagName": "reasoning" }
150
150
  * }
151
151
  * }
152
152
  * </META>
@@ -155,7 +155,7 @@ function parseRoleTags(template) {
155
155
  * `;
156
156
  *
157
157
  * const { metadata, content } = parseTemplateWithMetadata(template);
158
- * // metadata.settings will contain the temperature and thinkingExtraction settings
158
+ * // metadata.settings will contain the temperature and thinkingTagFallback settings
159
159
  * // content will contain the SYSTEM and USER tags
160
160
  */
161
161
  function parseTemplateWithMetadata(template) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "genai-lite",
3
- "version": "0.4.0",
3
+ "version": "0.4.2",
4
4
  "description": "A lightweight, portable toolkit for interacting with various Generative AI APIs.",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -1,4 +0,0 @@
1
- /**
2
- * Tests for LLMService.createMessages method
3
- */
4
- export {};
@@ -1,364 +0,0 @@
1
- "use strict";
2
- /**
3
- * Tests for LLMService.createMessages method
4
- */
5
- Object.defineProperty(exports, "__esModule", { value: true });
6
- const LLMService_1 = require("./LLMService");
7
- // Create a mock API key provider
8
- const mockApiKeyProvider = async () => 'test-api-key';
9
- describe('LLMService.createMessages', () => {
10
- let service;
11
- beforeEach(() => {
12
- service = new LLMService_1.LLMService(mockApiKeyProvider);
13
- });
14
- describe('Basic template parsing', () => {
15
- it('should parse simple template without model context', async () => {
16
- const result = await service.createMessages({
17
- template: 'Hello, how can I help you?'
18
- });
19
- expect(result.messages).toEqual([
20
- { role: 'user', content: 'Hello, how can I help you?' }
21
- ]);
22
- expect(result.modelContext).toBeNull();
23
- });
24
- it('should parse multi-turn template without model context', async () => {
25
- const result = await service.createMessages({
26
- template: `
27
- <SYSTEM>You are a helpful assistant.</SYSTEM>
28
- <USER>Hello</USER>
29
- <ASSISTANT>Hi! How can I help you today?</ASSISTANT>
30
- <USER>Can you explain {{topic}}?</USER>
31
- `,
32
- variables: { topic: 'promises in JavaScript' }
33
- });
34
- expect(result.messages).toEqual([
35
- { role: 'system', content: 'You are a helpful assistant.' },
36
- { role: 'user', content: 'Hello' },
37
- { role: 'assistant', content: 'Hi! How can I help you today?' },
38
- { role: 'user', content: 'Can you explain promises in JavaScript?' }
39
- ]);
40
- expect(result.modelContext).toBeNull();
41
- });
42
- });
43
- describe('Model-aware templates', () => {
44
- it('should inject model context for valid preset', async () => {
45
- const result = await service.createMessages({
46
- template: `
47
- <SYSTEM>You are a {{ thinking_enabled ? "thoughtful" : "standard" }} assistant.</SYSTEM>
48
- <USER>Help me understand {{concept}}</USER>
49
- `,
50
- variables: { concept: 'recursion' },
51
- presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
52
- });
53
- expect(result.modelContext).not.toBeNull();
54
- expect(result.modelContext?.thinking_enabled).toBe(true);
55
- expect(result.modelContext?.thinking_available).toBe(true);
56
- expect(result.modelContext?.model_id).toBe('claude-3-7-sonnet-20250219');
57
- expect(result.modelContext?.provider_id).toBe('anthropic');
58
- expect(result.messages).toEqual([
59
- { role: 'system', content: 'You are a thoughtful assistant.' },
60
- { role: 'user', content: 'Help me understand recursion' }
61
- ]);
62
- });
63
- it('should inject model context for valid provider/model combo', async () => {
64
- const result = await service.createMessages({
65
- template: 'Model: {{model_id}}, Provider: {{provider_id}}',
66
- providerId: 'openai',
67
- modelId: 'gpt-4.1'
68
- });
69
- expect(result.modelContext).not.toBeNull();
70
- expect(result.modelContext?.model_id).toBe('gpt-4.1');
71
- expect(result.modelContext?.provider_id).toBe('openai');
72
- expect(result.messages).toEqual([
73
- { role: 'user', content: 'Model: gpt-4.1, Provider: openai' }
74
- ]);
75
- });
76
- it('should handle model without reasoning support', async () => {
77
- const result = await service.createMessages({
78
- template: 'Thinking available: {{thinking_available}}, enabled: {{thinking_enabled}}',
79
- presetId: 'openai-gpt-4.1-default'
80
- });
81
- expect(result.modelContext?.thinking_available).toBe(false);
82
- expect(result.modelContext?.thinking_enabled).toBe(false);
83
- });
84
- });
85
- describe('Complex scenarios', () => {
86
- it('should handle variables that inject role tags', async () => {
87
- const result = await service.createMessages({
88
- template: `
89
- <SYSTEM>Base system prompt</SYSTEM>
90
- {{extraMessages}}
91
- <USER>Final question</USER>
92
- `,
93
- variables: {
94
- extraMessages: '<USER>First question</USER>\n<ASSISTANT>First answer</ASSISTANT>'
95
- }
96
- });
97
- expect(result.messages).toEqual([
98
- { role: 'system', content: 'Base system prompt' },
99
- { role: 'user', content: 'First question' },
100
- { role: 'assistant', content: 'First answer' },
101
- { role: 'user', content: 'Final question' }
102
- ]);
103
- });
104
- it('should handle conditional role injection based on model context', async () => {
105
- const result = await service.createMessages({
106
- template: `
107
- {{ thinking_enabled ? '<SYSTEM>Think step-by-step before answering.</SYSTEM>' : '' }}
108
- <USER>Solve: {{problem}}</USER>
109
- `,
110
- variables: { problem: 'What is 15% of 240?' },
111
- presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
112
- });
113
- expect(result.messages).toEqual([
114
- { role: 'system', content: 'Think step-by-step before answering.' },
115
- { role: 'user', content: 'Solve: What is 15% of 240?' }
116
- ]);
117
- });
118
- it('should handle nested conditionals with model context', async () => {
119
- const result = await service.createMessages({
120
- template: `
121
- <SYSTEM>
122
- You are using {{ model_id || "no model" }}.
123
- {{ thinking_available ? 'You have reasoning capabilities.' : 'Standard model.' }}
124
- </SYSTEM>
125
- <USER>Hello</USER>
126
- `,
127
- providerId: 'anthropic',
128
- modelId: 'claude-3-5-haiku-20241022'
129
- });
130
- expect(result.messages[0].role).toBe('system');
131
- // Check that we have model context
132
- expect(result.modelContext).not.toBeNull();
133
- if (result.modelContext) {
134
- expect(result.modelContext.model_id).toBe('claude-3-5-haiku-20241022');
135
- expect(result.modelContext.thinking_available).toBe(false);
136
- }
137
- });
138
- });
139
- describe('Error handling', () => {
140
- it('should proceed without model context on invalid preset', async () => {
141
- const result = await service.createMessages({
142
- template: 'Has model context: {{model_id ? "yes" : "no"}}',
143
- presetId: 'invalid-preset-id'
144
- });
145
- expect(result.modelContext).toBeNull();
146
- expect(result.messages).toEqual([
147
- { role: 'user', content: 'Has model context: no' }
148
- ]);
149
- });
150
- it('should handle invalid template syntax gracefully', async () => {
151
- const result = await service.createMessages({
152
- template: 'Unclosed conditional: {{ if true',
153
- variables: {}
154
- });
155
- // The template engine doesn't throw errors for invalid syntax, it renders as-is
156
- expect(result.messages).toEqual([
157
- { role: 'user', content: 'Unclosed conditional: {{ if true' }
158
- ]);
159
- });
160
- it('should handle empty template', async () => {
161
- const result = await service.createMessages({
162
- template: ''
163
- });
164
- expect(result.messages).toEqual([]);
165
- expect(result.modelContext).toBeNull();
166
- });
167
- it('should handle whitespace-only template', async () => {
168
- const result = await service.createMessages({
169
- template: ' \n\t '
170
- });
171
- expect(result.messages).toEqual([]);
172
- expect(result.modelContext).toBeNull();
173
- });
174
- });
175
- describe('Integration with reasoning settings', () => {
176
- it('should handle reasoning settings in model context', async () => {
177
- const result = await service.createMessages({
178
- template: `
179
- Thinking enabled: {{thinking_enabled}}
180
- Thinking available: {{thinking_available}}
181
- Model: {{model_id}}
182
- `,
183
- presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
184
- });
185
- expect(result.modelContext?.thinking_enabled).toBe(true);
186
- expect(result.modelContext?.thinking_available).toBe(true);
187
- expect(result.messages[0].content).toContain('Thinking enabled: true');
188
- expect(result.messages[0].content).toContain('Thinking available: true');
189
- });
190
- it('should handle models with always-on reasoning', async () => {
191
- const result = await service.createMessages({
192
- template: 'Provider: {{provider_id}}, Model: {{model_id}}',
193
- providerId: 'gemini',
194
- modelId: 'gemini-2.5-pro'
195
- });
196
- expect(result.modelContext).not.toBeNull();
197
- expect(result.modelContext?.provider_id).toBe('gemini');
198
- expect(result.modelContext?.model_id).toBe('gemini-2.5-pro');
199
- });
200
- });
201
- describe('Variable precedence', () => {
202
- it('should allow user variables to override model context', async () => {
203
- const result = await service.createMessages({
204
- template: 'Model: {{model_id}}',
205
- variables: { model_id: 'user-override' },
206
- presetId: 'openai-gpt-4.1-default'
207
- });
208
- expect(result.messages).toEqual([
209
- { role: 'user', content: 'Model: user-override' }
210
- ]);
211
- });
212
- it('should merge variables correctly', async () => {
213
- const result = await service.createMessages({
214
- template: 'Model: {{model_id}}, Task: {{task}}, Thinking: {{thinking_enabled}}',
215
- variables: { task: 'code review' },
216
- presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
217
- });
218
- expect(result.messages[0].content).toBe('Model: claude-3-7-sonnet-20250219, Task: code review, Thinking: true');
219
- });
220
- });
221
- describe('Template metadata parsing', () => {
222
- it('should extract settings from META block', async () => {
223
- const result = await service.createMessages({
224
- template: `<META>
225
- {
226
- "settings": {
227
- "temperature": 0.9,
228
- "thinkingExtraction": { "enabled": true, "tag": "reasoning" }
229
- }
230
- }
231
- </META>
232
- <SYSTEM>You are a creative writer.</SYSTEM>
233
- <USER>Write a story about {{topic}}</USER>`,
234
- variables: { topic: 'a robot discovering music' }
235
- });
236
- expect(result.messages).toEqual([
237
- { role: 'system', content: 'You are a creative writer.' },
238
- { role: 'user', content: 'Write a story about a robot discovering music' }
239
- ]);
240
- expect(result.settings).toEqual({
241
- temperature: 0.9,
242
- thinkingExtraction: { enabled: true, tag: 'reasoning' }
243
- });
244
- });
245
- it('should return empty settings when no META block exists', async () => {
246
- const result = await service.createMessages({
247
- template: '<USER>Simple message</USER>'
248
- });
249
- expect(result.messages).toEqual([
250
- { role: 'user', content: 'Simple message' }
251
- ]);
252
- expect(result.settings).toEqual({});
253
- });
254
- it('should handle invalid settings in META block with warnings', async () => {
255
- const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation();
256
- const result = await service.createMessages({
257
- template: `<META>
258
- {
259
- "settings": {
260
- "temperature": 3.0,
261
- "unknownSetting": "value",
262
- "maxTokens": -50
263
- }
264
- }
265
- </META>
266
- <USER>Test</USER>`
267
- });
268
- expect(result.messages).toEqual([
269
- { role: 'user', content: 'Test' }
270
- ]);
271
- expect(result.settings).toEqual({}); // All settings were invalid
272
- expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid temperature value'));
273
- expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Unknown setting "unknownSetting"'));
274
- expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid maxTokens value'));
275
- consoleWarnSpy.mockRestore();
276
- });
277
- it('should work with model context and META settings', async () => {
278
- const result = await service.createMessages({
279
- template: `<META>
280
- {
281
- "settings": {
282
- "temperature": 0.7,
283
- "maxTokens": 2000
284
- }
285
- }
286
- </META>
287
- <SYSTEM>You are a {{ thinking_enabled ? "thoughtful" : "quick" }} assistant.</SYSTEM>
288
- <USER>Help me understand {{concept}}</USER>`,
289
- variables: { concept: 'recursion' },
290
- presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
291
- });
292
- expect(result.messages).toEqual([
293
- { role: 'system', content: 'You are a thoughtful assistant.' },
294
- { role: 'user', content: 'Help me understand recursion' }
295
- ]);
296
- expect(result.settings).toEqual({
297
- temperature: 0.7,
298
- maxTokens: 2000
299
- });
300
- expect(result.modelContext).not.toBeNull();
301
- expect(result.modelContext?.thinking_enabled).toBe(true);
302
- });
303
- it('should validate complex nested settings', async () => {
304
- const result = await service.createMessages({
305
- template: `<META>
306
- {
307
- "settings": {
308
- "reasoning": {
309
- "enabled": true,
310
- "effort": "high",
311
- "maxTokens": 5000
312
- },
313
- "stopSequences": ["\\n\\n", "END"],
314
- "frequencyPenalty": 0.5
315
- }
316
- }
317
- </META>
318
- <USER>Complex request</USER>`
319
- });
320
- expect(result.settings).toEqual({
321
- reasoning: {
322
- enabled: true,
323
- effort: 'high',
324
- maxTokens: 5000
325
- },
326
- stopSequences: ['\n\n', 'END'],
327
- frequencyPenalty: 0.5
328
- });
329
- });
330
- it('should handle invalid nested settings gracefully', async () => {
331
- const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation();
332
- const result = await service.createMessages({
333
- template: `<META>
334
- {
335
- "settings": {
336
- "reasoning": {
337
- "enabled": "yes",
338
- "effort": "maximum",
339
- "maxTokens": -1000
340
- }
341
- }
342
- }
343
- </META>
344
- <USER>Test</USER>`
345
- });
346
- expect(result.settings).toEqual({}); // All fields were invalid, so empty object
347
- expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid reasoning.enabled'));
348
- expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid reasoning.effort'));
349
- expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid reasoning.maxTokens'));
350
- consoleWarnSpy.mockRestore();
351
- });
352
- it('should maintain backward compatibility for callers not using settings', async () => {
353
- // Old code that destructures without settings should still work
354
- const { messages, modelContext } = await service.createMessages({
355
- template: `<META>{"settings": {"temperature": 0.8}}</META><USER>Test</USER>`
356
- });
357
- expect(messages).toEqual([
358
- { role: 'user', content: 'Test' }
359
- ]);
360
- expect(modelContext).toBeNull();
361
- // settings field exists but old code doesn't need to know about it
362
- });
363
- });
364
- });