genai-lite 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +508 -30
  2. package/dist/config/presets.json +121 -17
  3. package/dist/index.d.ts +3 -3
  4. package/dist/index.js +4 -3
  5. package/dist/llm/LLMService.createMessages.test.d.ts +4 -0
  6. package/dist/llm/LLMService.createMessages.test.js +364 -0
  7. package/dist/llm/LLMService.d.ts +49 -47
  8. package/dist/llm/LLMService.js +208 -303
  9. package/dist/llm/LLMService.original.d.ts +147 -0
  10. package/dist/llm/LLMService.original.js +656 -0
  11. package/dist/llm/LLMService.prepareMessage.test.d.ts +1 -0
  12. package/dist/llm/LLMService.prepareMessage.test.js +303 -0
  13. package/dist/llm/LLMService.sendMessage.preset.test.d.ts +1 -0
  14. package/dist/llm/LLMService.sendMessage.preset.test.js +153 -0
  15. package/dist/llm/LLMService.test.js +275 -0
  16. package/dist/llm/clients/AnthropicClientAdapter.js +64 -10
  17. package/dist/llm/clients/AnthropicClientAdapter.test.js +11 -1
  18. package/dist/llm/clients/GeminiClientAdapter.js +70 -11
  19. package/dist/llm/clients/GeminiClientAdapter.test.js +125 -1
  20. package/dist/llm/clients/MockClientAdapter.js +9 -3
  21. package/dist/llm/clients/MockClientAdapter.test.js +11 -1
  22. package/dist/llm/clients/OpenAIClientAdapter.js +26 -10
  23. package/dist/llm/clients/OpenAIClientAdapter.test.js +11 -1
  24. package/dist/llm/config.js +117 -2
  25. package/dist/llm/config.test.js +17 -0
  26. package/dist/llm/services/AdapterRegistry.d.ts +59 -0
  27. package/dist/llm/services/AdapterRegistry.js +113 -0
  28. package/dist/llm/services/AdapterRegistry.test.d.ts +1 -0
  29. package/dist/llm/services/AdapterRegistry.test.js +239 -0
  30. package/dist/llm/services/ModelResolver.d.ts +35 -0
  31. package/dist/llm/services/ModelResolver.js +116 -0
  32. package/dist/llm/services/ModelResolver.test.d.ts +1 -0
  33. package/dist/llm/services/ModelResolver.test.js +158 -0
  34. package/dist/llm/services/PresetManager.d.ts +27 -0
  35. package/dist/llm/services/PresetManager.js +50 -0
  36. package/dist/llm/services/PresetManager.test.d.ts +1 -0
  37. package/dist/llm/services/PresetManager.test.js +210 -0
  38. package/dist/llm/services/RequestValidator.d.ts +31 -0
  39. package/dist/llm/services/RequestValidator.js +122 -0
  40. package/dist/llm/services/RequestValidator.test.d.ts +1 -0
  41. package/dist/llm/services/RequestValidator.test.js +159 -0
  42. package/dist/llm/services/SettingsManager.d.ts +32 -0
  43. package/dist/llm/services/SettingsManager.js +223 -0
  44. package/dist/llm/services/SettingsManager.test.d.ts +1 -0
  45. package/dist/llm/services/SettingsManager.test.js +266 -0
  46. package/dist/llm/types.d.ts +107 -0
  47. package/dist/prompting/builder.d.ts +4 -0
  48. package/dist/prompting/builder.js +12 -61
  49. package/dist/prompting/content.js +3 -9
  50. package/dist/prompting/index.d.ts +2 -3
  51. package/dist/prompting/index.js +4 -5
  52. package/dist/prompting/parser.d.ts +80 -0
  53. package/dist/prompting/parser.js +133 -0
  54. package/dist/prompting/parser.test.js +348 -0
  55. package/dist/prompting/template.d.ts +8 -0
  56. package/dist/prompting/template.js +89 -6
  57. package/dist/prompting/template.test.js +116 -0
  58. package/package.json +3 -2
  59. package/src/config/presets.json +122 -17
@@ -6,7 +6,23 @@
6
6
  "providerId": "anthropic",
7
7
  "modelId": "claude-sonnet-4-20250514",
8
8
  "settings": {
9
- "temperature": 0.3
9
+ "temperature": 0.7,
10
+ "reasoning": {
11
+ "enabled": false
12
+ }
13
+ }
14
+ },
15
+ {
16
+ "id": "anthropic-claude-sonnet-4-20250514-thinking",
17
+ "displayName": "Anthropic - Claude Sonnet 4 (Thinking)",
18
+ "description": "Claude Sonnet 4 with reasoning enabled for step-by-step thinking.",
19
+ "providerId": "anthropic",
20
+ "modelId": "claude-sonnet-4-20250514",
21
+ "settings": {
22
+ "temperature": 0.7,
23
+ "reasoning": {
24
+ "enabled": true
25
+ }
10
26
  }
11
27
  },
12
28
  {
@@ -16,7 +32,23 @@
16
32
  "providerId": "anthropic",
17
33
  "modelId": "claude-opus-4-20250514",
18
34
  "settings": {
19
- "temperature": 0.3
35
+ "temperature": 0.7,
36
+ "reasoning": {
37
+ "enabled": false
38
+ }
39
+ }
40
+ },
41
+ {
42
+ "id": "anthropic-claude-opus-4-20250514-thinking",
43
+ "displayName": "Anthropic - Claude Opus 4 (Thinking)",
44
+ "description": "Claude Opus 4 with reasoning enabled for complex problem solving.",
45
+ "providerId": "anthropic",
46
+ "modelId": "claude-opus-4-20250514",
47
+ "settings": {
48
+ "temperature": 0.7,
49
+ "reasoning": {
50
+ "enabled": true
51
+ }
20
52
  }
21
53
  },
22
54
  {
@@ -26,7 +58,23 @@
26
58
  "providerId": "anthropic",
27
59
  "modelId": "claude-3-7-sonnet-20250219",
28
60
  "settings": {
29
- "temperature": 0.3
61
+ "temperature": 0.7,
62
+ "reasoning": {
63
+ "enabled": false
64
+ }
65
+ }
66
+ },
67
+ {
68
+ "id": "anthropic-claude-3-7-sonnet-20250219-thinking",
69
+ "displayName": "Anthropic - Claude 3.7 Sonnet (Thinking)",
70
+ "description": "Claude 3.7 Sonnet with full reasoning output for detailed analysis.",
71
+ "providerId": "anthropic",
72
+ "modelId": "claude-3-7-sonnet-20250219",
73
+ "settings": {
74
+ "temperature": 0.7,
75
+ "reasoning": {
76
+ "enabled": true
77
+ }
30
78
  }
31
79
  },
32
80
  {
@@ -36,7 +84,7 @@
36
84
  "providerId": "anthropic",
37
85
  "modelId": "claude-3-5-sonnet-20241022",
38
86
  "settings": {
39
- "temperature": 0.3
87
+ "temperature": 0.7
40
88
  }
41
89
  },
42
90
  {
@@ -46,7 +94,7 @@
46
94
  "providerId": "anthropic",
47
95
  "modelId": "claude-3-5-haiku-20241022",
48
96
  "settings": {
49
- "temperature": 0.3
97
+ "temperature": 0.7
50
98
  }
51
99
  },
52
100
  {
@@ -56,7 +104,7 @@
56
104
  "providerId": "gemini",
57
105
  "modelId": "gemini-2.5-pro",
58
106
  "settings": {
59
- "temperature": 0.3,
107
+ "temperature": 0.7,
60
108
  "geminiSafetySettings": [
61
109
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
62
110
  {
@@ -78,7 +126,7 @@
78
126
  "providerId": "gemini",
79
127
  "modelId": "gemini-2.5-flash",
80
128
  "settings": {
81
- "temperature": 0.3,
129
+ "temperature": 0.7,
82
130
  "geminiSafetySettings": [
83
131
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
84
132
  {
@@ -90,7 +138,35 @@
90
138
  "threshold": "BLOCK_NONE"
91
139
  },
92
140
  { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
93
- ]
141
+ ],
142
+ "reasoning": {
143
+ "enabled": false
144
+ }
145
+ }
146
+ },
147
+ {
148
+ "id": "google-gemini-2.5-flash-thinking",
149
+ "displayName": "Google - Gemini 2.5 Flash (Thinking)",
150
+ "description": "Gemini 2.5 Flash with dynamic reasoning for adaptive problem solving.",
151
+ "providerId": "gemini",
152
+ "modelId": "gemini-2.5-flash",
153
+ "settings": {
154
+ "temperature": 0.7,
155
+ "geminiSafetySettings": [
156
+ { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
157
+ {
158
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
159
+ "threshold": "BLOCK_NONE"
160
+ },
161
+ {
162
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
163
+ "threshold": "BLOCK_NONE"
164
+ },
165
+ { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
166
+ ],
167
+ "reasoning": {
168
+ "enabled": true
169
+ }
94
170
  }
95
171
  },
96
172
  {
@@ -100,7 +176,7 @@
100
176
  "providerId": "gemini",
101
177
  "modelId": "gemini-2.5-flash-lite-preview-06-17",
102
178
  "settings": {
103
- "temperature": 0.3,
179
+ "temperature": 0.7,
104
180
  "geminiSafetySettings": [
105
181
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
106
182
  {
@@ -112,7 +188,35 @@
112
188
  "threshold": "BLOCK_NONE"
113
189
  },
114
190
  { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
115
- ]
191
+ ],
192
+ "reasoning": {
193
+ "enabled": false
194
+ }
195
+ }
196
+ },
197
+ {
198
+ "id": "google-gemini-2.5-flash-lite-preview-thinking",
199
+ "displayName": "Google - Gemini 2.5 Flash-Lite Preview (Thinking)",
200
+ "description": "Gemini 2.5 Flash-Lite with dynamic reasoning for efficient thinking.",
201
+ "providerId": "gemini",
202
+ "modelId": "gemini-2.5-flash-lite-preview-06-17",
203
+ "settings": {
204
+ "temperature": 0.7,
205
+ "geminiSafetySettings": [
206
+ { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
207
+ {
208
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
209
+ "threshold": "BLOCK_NONE"
210
+ },
211
+ {
212
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
213
+ "threshold": "BLOCK_NONE"
214
+ },
215
+ { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
216
+ ],
217
+ "reasoning": {
218
+ "enabled": true
219
+ }
116
220
  }
117
221
  },
118
222
  {
@@ -122,7 +226,7 @@
122
226
  "providerId": "gemini",
123
227
  "modelId": "gemini-2.0-flash",
124
228
  "settings": {
125
- "temperature": 0.3,
229
+ "temperature": 0.7,
126
230
  "geminiSafetySettings": [
127
231
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
128
232
  {
@@ -144,7 +248,7 @@
144
248
  "providerId": "gemini",
145
249
  "modelId": "gemini-2.0-flash-lite",
146
250
  "settings": {
147
- "temperature": 0.3,
251
+ "temperature": 0.7,
148
252
  "geminiSafetySettings": [
149
253
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
150
254
  {
@@ -176,7 +280,7 @@
176
280
  "providerId": "openai",
177
281
  "modelId": "gpt-4.1",
178
282
  "settings": {
179
- "temperature": 0.3
283
+ "temperature": 0.7
180
284
  }
181
285
  },
182
286
  {
@@ -186,7 +290,7 @@
186
290
  "providerId": "openai",
187
291
  "modelId": "gpt-4.1-mini",
188
292
  "settings": {
189
- "temperature": 0.3
293
+ "temperature": 0.7
190
294
  }
191
295
  },
192
296
  {
@@ -196,7 +300,7 @@
196
300
  "providerId": "openai",
197
301
  "modelId": "gpt-4.1-nano",
198
302
  "settings": {
199
- "temperature": 0.3
303
+ "temperature": 0.7
200
304
  }
201
305
  },
202
306
  {
@@ -206,7 +310,7 @@
206
310
  "providerId": "mistral",
207
311
  "modelId": "codestral-2501",
208
312
  "settings": {
209
- "temperature": 0.3
313
+ "temperature": 0.7
210
314
  }
211
315
  },
212
316
  {
@@ -216,7 +320,7 @@
216
320
  "providerId": "mistral",
217
321
  "modelId": "devstral-small-2505",
218
322
  "settings": {
219
- "temperature": 0.3
323
+ "temperature": 0.7
220
324
  }
221
325
  }
222
326
  ]
package/dist/index.d.ts CHANGED
@@ -1,11 +1,11 @@
1
1
  export type { ApiKeyProvider } from "./types";
2
2
  export { LLMService } from "./llm/LLMService";
3
- export type { LLMServiceOptions, PresetMode } from "./llm/LLMService";
3
+ export type { LLMServiceOptions, PresetMode, CreateMessagesResult } from "./llm/LLMService";
4
4
  export type { ModelPreset } from "./types/presets";
5
5
  export * from "./llm/types";
6
6
  export * from "./llm/clients/types";
7
7
  export { fromEnvironment } from "./providers/fromEnvironment";
8
8
  export { renderTemplate } from "./prompting/template";
9
9
  export { countTokens, getSmartPreview, extractRandomVariables } from "./prompting/content";
10
- export { buildMessagesFromTemplate } from "./prompting/builder";
11
- export { parseStructuredContent } from "./prompting/parser";
10
+ export { parseStructuredContent, parseRoleTags, extractInitialTaggedContent, parseTemplateWithMetadata } from "./prompting/parser";
11
+ export type { TemplateMetadata } from "./prompting/parser";
package/dist/index.js CHANGED
@@ -14,7 +14,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- exports.parseStructuredContent = exports.buildMessagesFromTemplate = exports.extractRandomVariables = exports.getSmartPreview = exports.countTokens = exports.renderTemplate = exports.fromEnvironment = exports.LLMService = void 0;
17
+ exports.parseTemplateWithMetadata = exports.extractInitialTaggedContent = exports.parseRoleTags = exports.parseStructuredContent = exports.extractRandomVariables = exports.getSmartPreview = exports.countTokens = exports.renderTemplate = exports.fromEnvironment = exports.LLMService = void 0;
18
18
  // --- LLM Service ---
19
19
  var LLMService_1 = require("./llm/LLMService");
20
20
  Object.defineProperty(exports, "LLMService", { enumerable: true, get: function () { return LLMService_1.LLMService; } });
@@ -32,7 +32,8 @@ var content_1 = require("./prompting/content");
32
32
  Object.defineProperty(exports, "countTokens", { enumerable: true, get: function () { return content_1.countTokens; } });
33
33
  Object.defineProperty(exports, "getSmartPreview", { enumerable: true, get: function () { return content_1.getSmartPreview; } });
34
34
  Object.defineProperty(exports, "extractRandomVariables", { enumerable: true, get: function () { return content_1.extractRandomVariables; } });
35
- var builder_1 = require("./prompting/builder");
36
- Object.defineProperty(exports, "buildMessagesFromTemplate", { enumerable: true, get: function () { return builder_1.buildMessagesFromTemplate; } });
37
35
  var parser_1 = require("./prompting/parser");
38
36
  Object.defineProperty(exports, "parseStructuredContent", { enumerable: true, get: function () { return parser_1.parseStructuredContent; } });
37
+ Object.defineProperty(exports, "parseRoleTags", { enumerable: true, get: function () { return parser_1.parseRoleTags; } });
38
+ Object.defineProperty(exports, "extractInitialTaggedContent", { enumerable: true, get: function () { return parser_1.extractInitialTaggedContent; } });
39
+ Object.defineProperty(exports, "parseTemplateWithMetadata", { enumerable: true, get: function () { return parser_1.parseTemplateWithMetadata; } });
@@ -0,0 +1,4 @@
1
+ /**
2
+ * Tests for LLMService.createMessages method
3
+ */
4
+ export {};
@@ -0,0 +1,364 @@
1
+ "use strict";
2
+ /**
3
+ * Tests for LLMService.createMessages method
4
+ */
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ const LLMService_1 = require("./LLMService");
7
+ // Create a mock API key provider
8
+ const mockApiKeyProvider = async () => 'test-api-key';
9
+ describe('LLMService.createMessages', () => {
10
+ let service;
11
+ beforeEach(() => {
12
+ service = new LLMService_1.LLMService(mockApiKeyProvider);
13
+ });
14
+ describe('Basic template parsing', () => {
15
+ it('should parse simple template without model context', async () => {
16
+ const result = await service.createMessages({
17
+ template: 'Hello, how can I help you?'
18
+ });
19
+ expect(result.messages).toEqual([
20
+ { role: 'user', content: 'Hello, how can I help you?' }
21
+ ]);
22
+ expect(result.modelContext).toBeNull();
23
+ });
24
+ it('should parse multi-turn template without model context', async () => {
25
+ const result = await service.createMessages({
26
+ template: `
27
+ <SYSTEM>You are a helpful assistant.</SYSTEM>
28
+ <USER>Hello</USER>
29
+ <ASSISTANT>Hi! How can I help you today?</ASSISTANT>
30
+ <USER>Can you explain {{topic}}?</USER>
31
+ `,
32
+ variables: { topic: 'promises in JavaScript' }
33
+ });
34
+ expect(result.messages).toEqual([
35
+ { role: 'system', content: 'You are a helpful assistant.' },
36
+ { role: 'user', content: 'Hello' },
37
+ { role: 'assistant', content: 'Hi! How can I help you today?' },
38
+ { role: 'user', content: 'Can you explain promises in JavaScript?' }
39
+ ]);
40
+ expect(result.modelContext).toBeNull();
41
+ });
42
+ });
43
+ describe('Model-aware templates', () => {
44
+ it('should inject model context for valid preset', async () => {
45
+ const result = await service.createMessages({
46
+ template: `
47
+ <SYSTEM>You are a {{ thinking_enabled ? "thoughtful" : "standard" }} assistant.</SYSTEM>
48
+ <USER>Help me understand {{concept}}</USER>
49
+ `,
50
+ variables: { concept: 'recursion' },
51
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
52
+ });
53
+ expect(result.modelContext).not.toBeNull();
54
+ expect(result.modelContext?.thinking_enabled).toBe(true);
55
+ expect(result.modelContext?.thinking_available).toBe(true);
56
+ expect(result.modelContext?.model_id).toBe('claude-3-7-sonnet-20250219');
57
+ expect(result.modelContext?.provider_id).toBe('anthropic');
58
+ expect(result.messages).toEqual([
59
+ { role: 'system', content: 'You are a thoughtful assistant.' },
60
+ { role: 'user', content: 'Help me understand recursion' }
61
+ ]);
62
+ });
63
+ it('should inject model context for valid provider/model combo', async () => {
64
+ const result = await service.createMessages({
65
+ template: 'Model: {{model_id}}, Provider: {{provider_id}}',
66
+ providerId: 'openai',
67
+ modelId: 'gpt-4.1'
68
+ });
69
+ expect(result.modelContext).not.toBeNull();
70
+ expect(result.modelContext?.model_id).toBe('gpt-4.1');
71
+ expect(result.modelContext?.provider_id).toBe('openai');
72
+ expect(result.messages).toEqual([
73
+ { role: 'user', content: 'Model: gpt-4.1, Provider: openai' }
74
+ ]);
75
+ });
76
+ it('should handle model without reasoning support', async () => {
77
+ const result = await service.createMessages({
78
+ template: 'Thinking available: {{thinking_available}}, enabled: {{thinking_enabled}}',
79
+ presetId: 'openai-gpt-4.1-default'
80
+ });
81
+ expect(result.modelContext?.thinking_available).toBe(false);
82
+ expect(result.modelContext?.thinking_enabled).toBe(false);
83
+ });
84
+ });
85
+ describe('Complex scenarios', () => {
86
+ it('should handle variables that inject role tags', async () => {
87
+ const result = await service.createMessages({
88
+ template: `
89
+ <SYSTEM>Base system prompt</SYSTEM>
90
+ {{extraMessages}}
91
+ <USER>Final question</USER>
92
+ `,
93
+ variables: {
94
+ extraMessages: '<USER>First question</USER>\n<ASSISTANT>First answer</ASSISTANT>'
95
+ }
96
+ });
97
+ expect(result.messages).toEqual([
98
+ { role: 'system', content: 'Base system prompt' },
99
+ { role: 'user', content: 'First question' },
100
+ { role: 'assistant', content: 'First answer' },
101
+ { role: 'user', content: 'Final question' }
102
+ ]);
103
+ });
104
+ it('should handle conditional role injection based on model context', async () => {
105
+ const result = await service.createMessages({
106
+ template: `
107
+ {{ thinking_enabled ? '<SYSTEM>Think step-by-step before answering.</SYSTEM>' : '' }}
108
+ <USER>Solve: {{problem}}</USER>
109
+ `,
110
+ variables: { problem: 'What is 15% of 240?' },
111
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
112
+ });
113
+ expect(result.messages).toEqual([
114
+ { role: 'system', content: 'Think step-by-step before answering.' },
115
+ { role: 'user', content: 'Solve: What is 15% of 240?' }
116
+ ]);
117
+ });
118
+ it('should handle nested conditionals with model context', async () => {
119
+ const result = await service.createMessages({
120
+ template: `
121
+ <SYSTEM>
122
+ You are using {{ model_id || "no model" }}.
123
+ {{ thinking_available ? 'You have reasoning capabilities.' : 'Standard model.' }}
124
+ </SYSTEM>
125
+ <USER>Hello</USER>
126
+ `,
127
+ providerId: 'anthropic',
128
+ modelId: 'claude-3-5-haiku-20241022'
129
+ });
130
+ expect(result.messages[0].role).toBe('system');
131
+ // Check that we have model context
132
+ expect(result.modelContext).not.toBeNull();
133
+ if (result.modelContext) {
134
+ expect(result.modelContext.model_id).toBe('claude-3-5-haiku-20241022');
135
+ expect(result.modelContext.thinking_available).toBe(false);
136
+ }
137
+ });
138
+ });
139
+ describe('Error handling', () => {
140
+ it('should proceed without model context on invalid preset', async () => {
141
+ const result = await service.createMessages({
142
+ template: 'Has model context: {{model_id ? "yes" : "no"}}',
143
+ presetId: 'invalid-preset-id'
144
+ });
145
+ expect(result.modelContext).toBeNull();
146
+ expect(result.messages).toEqual([
147
+ { role: 'user', content: 'Has model context: no' }
148
+ ]);
149
+ });
150
+ it('should handle invalid template syntax gracefully', async () => {
151
+ const result = await service.createMessages({
152
+ template: 'Unclosed conditional: {{ if true',
153
+ variables: {}
154
+ });
155
+ // The template engine doesn't throw errors for invalid syntax, it renders as-is
156
+ expect(result.messages).toEqual([
157
+ { role: 'user', content: 'Unclosed conditional: {{ if true' }
158
+ ]);
159
+ });
160
+ it('should handle empty template', async () => {
161
+ const result = await service.createMessages({
162
+ template: ''
163
+ });
164
+ expect(result.messages).toEqual([]);
165
+ expect(result.modelContext).toBeNull();
166
+ });
167
+ it('should handle whitespace-only template', async () => {
168
+ const result = await service.createMessages({
169
+ template: ' \n\t '
170
+ });
171
+ expect(result.messages).toEqual([]);
172
+ expect(result.modelContext).toBeNull();
173
+ });
174
+ });
175
+ describe('Integration with reasoning settings', () => {
176
+ it('should handle reasoning settings in model context', async () => {
177
+ const result = await service.createMessages({
178
+ template: `
179
+ Thinking enabled: {{thinking_enabled}}
180
+ Thinking available: {{thinking_available}}
181
+ Model: {{model_id}}
182
+ `,
183
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
184
+ });
185
+ expect(result.modelContext?.thinking_enabled).toBe(true);
186
+ expect(result.modelContext?.thinking_available).toBe(true);
187
+ expect(result.messages[0].content).toContain('Thinking enabled: true');
188
+ expect(result.messages[0].content).toContain('Thinking available: true');
189
+ });
190
+ it('should handle models with always-on reasoning', async () => {
191
+ const result = await service.createMessages({
192
+ template: 'Provider: {{provider_id}}, Model: {{model_id}}',
193
+ providerId: 'gemini',
194
+ modelId: 'gemini-2.5-pro'
195
+ });
196
+ expect(result.modelContext).not.toBeNull();
197
+ expect(result.modelContext?.provider_id).toBe('gemini');
198
+ expect(result.modelContext?.model_id).toBe('gemini-2.5-pro');
199
+ });
200
+ });
201
+ describe('Variable precedence', () => {
202
+ it('should allow user variables to override model context', async () => {
203
+ const result = await service.createMessages({
204
+ template: 'Model: {{model_id}}',
205
+ variables: { model_id: 'user-override' },
206
+ presetId: 'openai-gpt-4.1-default'
207
+ });
208
+ expect(result.messages).toEqual([
209
+ { role: 'user', content: 'Model: user-override' }
210
+ ]);
211
+ });
212
+ it('should merge variables correctly', async () => {
213
+ const result = await service.createMessages({
214
+ template: 'Model: {{model_id}}, Task: {{task}}, Thinking: {{thinking_enabled}}',
215
+ variables: { task: 'code review' },
216
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
217
+ });
218
+ expect(result.messages[0].content).toBe('Model: claude-3-7-sonnet-20250219, Task: code review, Thinking: true');
219
+ });
220
+ });
221
+ describe('Template metadata parsing', () => {
222
+ it('should extract settings from META block', async () => {
223
+ const result = await service.createMessages({
224
+ template: `<META>
225
+ {
226
+ "settings": {
227
+ "temperature": 0.9,
228
+ "thinkingExtraction": { "enabled": true, "tag": "reasoning" }
229
+ }
230
+ }
231
+ </META>
232
+ <SYSTEM>You are a creative writer.</SYSTEM>
233
+ <USER>Write a story about {{topic}}</USER>`,
234
+ variables: { topic: 'a robot discovering music' }
235
+ });
236
+ expect(result.messages).toEqual([
237
+ { role: 'system', content: 'You are a creative writer.' },
238
+ { role: 'user', content: 'Write a story about a robot discovering music' }
239
+ ]);
240
+ expect(result.settings).toEqual({
241
+ temperature: 0.9,
242
+ thinkingExtraction: { enabled: true, tag: 'reasoning' }
243
+ });
244
+ });
245
+ it('should return empty settings when no META block exists', async () => {
246
+ const result = await service.createMessages({
247
+ template: '<USER>Simple message</USER>'
248
+ });
249
+ expect(result.messages).toEqual([
250
+ { role: 'user', content: 'Simple message' }
251
+ ]);
252
+ expect(result.settings).toEqual({});
253
+ });
254
+ it('should handle invalid settings in META block with warnings', async () => {
255
+ const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation();
256
+ const result = await service.createMessages({
257
+ template: `<META>
258
+ {
259
+ "settings": {
260
+ "temperature": 3.0,
261
+ "unknownSetting": "value",
262
+ "maxTokens": -50
263
+ }
264
+ }
265
+ </META>
266
+ <USER>Test</USER>`
267
+ });
268
+ expect(result.messages).toEqual([
269
+ { role: 'user', content: 'Test' }
270
+ ]);
271
+ expect(result.settings).toEqual({}); // All settings were invalid
272
+ expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid temperature value'));
273
+ expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Unknown setting "unknownSetting"'));
274
+ expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid maxTokens value'));
275
+ consoleWarnSpy.mockRestore();
276
+ });
277
+ it('should work with model context and META settings', async () => {
278
+ const result = await service.createMessages({
279
+ template: `<META>
280
+ {
281
+ "settings": {
282
+ "temperature": 0.7,
283
+ "maxTokens": 2000
284
+ }
285
+ }
286
+ </META>
287
+ <SYSTEM>You are a {{ thinking_enabled ? "thoughtful" : "quick" }} assistant.</SYSTEM>
288
+ <USER>Help me understand {{concept}}</USER>`,
289
+ variables: { concept: 'recursion' },
290
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
291
+ });
292
+ expect(result.messages).toEqual([
293
+ { role: 'system', content: 'You are a thoughtful assistant.' },
294
+ { role: 'user', content: 'Help me understand recursion' }
295
+ ]);
296
+ expect(result.settings).toEqual({
297
+ temperature: 0.7,
298
+ maxTokens: 2000
299
+ });
300
+ expect(result.modelContext).not.toBeNull();
301
+ expect(result.modelContext?.thinking_enabled).toBe(true);
302
+ });
303
+ it('should validate complex nested settings', async () => {
304
+ const result = await service.createMessages({
305
+ template: `<META>
306
+ {
307
+ "settings": {
308
+ "reasoning": {
309
+ "enabled": true,
310
+ "effort": "high",
311
+ "maxTokens": 5000
312
+ },
313
+ "stopSequences": ["\\n\\n", "END"],
314
+ "frequencyPenalty": 0.5
315
+ }
316
+ }
317
+ </META>
318
+ <USER>Complex request</USER>`
319
+ });
320
+ expect(result.settings).toEqual({
321
+ reasoning: {
322
+ enabled: true,
323
+ effort: 'high',
324
+ maxTokens: 5000
325
+ },
326
+ stopSequences: ['\n\n', 'END'],
327
+ frequencyPenalty: 0.5
328
+ });
329
+ });
330
+ it('should handle invalid nested settings gracefully', async () => {
331
+ const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation();
332
+ const result = await service.createMessages({
333
+ template: `<META>
334
+ {
335
+ "settings": {
336
+ "reasoning": {
337
+ "enabled": "yes",
338
+ "effort": "maximum",
339
+ "maxTokens": -1000
340
+ }
341
+ }
342
+ }
343
+ </META>
344
+ <USER>Test</USER>`
345
+ });
346
+ expect(result.settings).toEqual({}); // All fields were invalid, so empty object
347
+ expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid reasoning.enabled'));
348
+ expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid reasoning.effort'));
349
+ expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Invalid reasoning.maxTokens'));
350
+ consoleWarnSpy.mockRestore();
351
+ });
352
+ it('should maintain backward compatibility for callers not using settings', async () => {
353
+ // Old code that destructures without settings should still work
354
+ const { messages, modelContext } = await service.createMessages({
355
+ template: `<META>{"settings": {"temperature": 0.8}}</META><USER>Test</USER>`
356
+ });
357
+ expect(messages).toEqual([
358
+ { role: 'user', content: 'Test' }
359
+ ]);
360
+ expect(modelContext).toBeNull();
361
+ // settings field exists but old code doesn't need to know about it
362
+ });
363
+ });
364
+ });