genai-lite 0.1.4 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +289 -12
  2. package/dist/config/presets.json +121 -17
  3. package/dist/index.d.ts +4 -1
  4. package/dist/index.js +11 -3
  5. package/dist/llm/LLMService.d.ts +39 -2
  6. package/dist/llm/LLMService.js +291 -78
  7. package/dist/llm/LLMService.prepareMessage.test.js +303 -0
  8. package/dist/llm/LLMService.sendMessage.preset.test.js +153 -0
  9. package/dist/llm/LLMService.test.js +83 -0
  10. package/dist/llm/clients/AnthropicClientAdapter.js +64 -10
  11. package/dist/llm/clients/AnthropicClientAdapter.test.js +7 -1
  12. package/dist/llm/clients/GeminiClientAdapter.js +70 -11
  13. package/dist/llm/clients/GeminiClientAdapter.test.js +121 -1
  14. package/dist/llm/clients/MockClientAdapter.test.js +7 -1
  15. package/dist/llm/clients/OpenAIClientAdapter.js +26 -10
  16. package/dist/llm/clients/OpenAIClientAdapter.test.js +7 -1
  17. package/dist/llm/config.js +112 -2
  18. package/dist/llm/config.test.js +17 -0
  19. package/dist/llm/types.d.ts +106 -0
  20. package/dist/prompting/builder.d.ts +34 -0
  21. package/dist/prompting/builder.js +112 -0
  22. package/dist/prompting/builder.test.d.ts +4 -0
  23. package/dist/prompting/builder.test.js +109 -0
  24. package/dist/prompting/content.d.ts +57 -0
  25. package/dist/prompting/content.js +146 -0
  26. package/dist/prompting/content.test.d.ts +4 -0
  27. package/dist/prompting/content.test.js +212 -0
  28. package/dist/prompting/index.d.ts +13 -0
  29. package/dist/prompting/index.js +26 -0
  30. package/dist/prompting/parser.d.ts +41 -0
  31. package/dist/prompting/parser.js +56 -0
  32. package/dist/prompting/parser.test.d.ts +4 -0
  33. package/dist/prompting/parser.test.js +116 -0
  34. package/dist/{utils/templateEngine.d.ts → prompting/template.d.ts} +7 -0
  35. package/dist/{utils/templateEngine.js → prompting/template.js} +7 -0
  36. package/dist/prompting/template.test.d.ts +1 -0
  37. package/dist/{utils/templateEngine.test.js → prompting/template.test.js} +30 -30
  38. package/package.json +7 -6
  39. package/src/config/presets.json +122 -17
  40. package/dist/utils/index.d.ts +0 -2
  41. package/dist/utils/index.js +0 -18
  42. package/dist/utils/prompt.d.ts +0 -6
  43. package/dist/utils/prompt.js +0 -55
  44. package/dist/utils/prompt.test.js +0 -115
  45. /package/dist/{utils/prompt.test.d.ts → llm/LLMService.prepareMessage.test.d.ts} +0 -0
  46. /package/dist/{utils/templateEngine.test.d.ts → llm/LLMService.sendMessage.preset.test.d.ts} +0 -0
package/README.md CHANGED
@@ -111,6 +111,16 @@ const llmService = new LLMService(myKeyProvider);
111
111
  - `codestral-2501` - Specialized for code generation
112
112
  - `devstral-small-2505` - Compact development-focused model
113
113
 
114
+ ### Models with Reasoning Support
115
+
116
+ Some models include advanced reasoning/thinking capabilities that enhance their problem-solving abilities:
117
+
118
+ - **Anthropic**: Claude Sonnet 4, Claude Opus 4, Claude 3.7 Sonnet
119
+ - **Google Gemini**: Gemini 2.5 Pro (always on), Gemini 2.5 Flash, Gemini 2.5 Flash-Lite Preview
120
+ - **OpenAI**: o4-mini (always on)
121
+
122
+ See the [Reasoning Mode](#reasoning-mode) section for usage details.
123
+
114
124
  ## Advanced Usage
115
125
 
116
126
  ### Custom Settings
@@ -129,6 +139,68 @@ const response = await llmService.sendMessage({
129
139
  });
130
140
  ```
131
141
 
142
+ ### Reasoning Mode
143
+
144
+ Enable advanced reasoning capabilities for supported models to get step-by-step thinking and improved problem-solving:
145
+
146
+ ```typescript
147
+ // Enable reasoning with automatic token budget
148
+ const response = await llmService.sendMessage({
149
+ providerId: 'gemini',
150
+ modelId: 'gemini-2.5-flash',
151
+ messages: [{ role: 'user', content: 'Solve this step by step: If a train travels 120km in 2 hours, what is its speed in m/s?' }],
152
+ settings: {
153
+ reasoning: {
154
+ enabled: true // Let the model decide how much thinking to do
155
+ }
156
+ }
157
+ });
158
+
159
+ // Use effort levels for quick control
160
+ const response = await llmService.sendMessage({
161
+ providerId: 'anthropic',
162
+ modelId: 'claude-3-7-sonnet-20250219',
163
+ messages: [{ role: 'user', content: 'Analyze this complex problem...' }],
164
+ settings: {
165
+ reasoning: {
166
+ enabled: true,
167
+ effort: 'high' // 'low', 'medium', or 'high'
168
+ }
169
+ }
170
+ });
171
+
172
+ // Set specific token budget for reasoning
173
+ const response = await llmService.sendMessage({
174
+ providerId: 'gemini',
175
+ modelId: 'gemini-2.5-flash-lite-preview-06-17',
176
+ messages: [{ role: 'user', content: 'What is the square root of 144?' }],
177
+ settings: {
178
+ reasoning: {
179
+ enabled: true,
180
+ maxTokens: 5000 // Specific token budget for reasoning
181
+ }
182
+ }
183
+ });
184
+
185
+ // Access reasoning output (if available)
186
+ if (response.object === 'chat.completion' && response.choices[0].reasoning) {
187
+ console.log('Model reasoning:', response.choices[0].reasoning);
188
+ console.log('Final answer:', response.choices[0].message.content);
189
+ }
190
+ ```
191
+
192
+ **Reasoning Options:**
193
+ - `enabled`: Turn reasoning on/off (some models like o4-mini and Gemini 2.5 Pro have it always on)
194
+ - `effort`: Quick presets - 'low' (20% budget), 'medium' (50%), 'high' (80%)
195
+ - `maxTokens`: Specific token budget for reasoning
196
+ - `exclude`: Set to `true` to enable reasoning but exclude it from the response
197
+
198
+ **Important Notes:**
199
+ - Reasoning tokens are billed separately and may cost more
200
+ - Some models (o4-mini, Gemini 2.5 Pro) cannot disable reasoning
201
+ - Not all models support reasoning - check the [supported models](#models-with-reasoning-support) list
202
+ - The `reasoning` field in the response contains the model's thought process (when available)
203
+
132
204
  ### Provider Information
133
205
 
134
206
  ```typescript
@@ -144,22 +216,27 @@ const presets = llmService.getPresets();
144
216
 
145
217
  ### Model Presets
146
218
 
147
- genai-lite includes a built-in set of model presets for common use cases. You can use these defaults, extend them with your own, or replace them entirely.
219
+ genai-lite includes a comprehensive set of model presets for common use cases. You can use these defaults, extend them with your own, or replace them entirely.
148
220
 
149
221
  #### Using Default Presets
150
222
 
223
+ The library ships with over 20 pre-configured presets (defined in `src/config/presets.json`), including specialized "thinking" presets for models with reasoning capabilities:
224
+
151
225
  ```typescript
152
226
  const llmService = new LLMService(fromEnvironment);
153
227
 
154
228
  // Get all default presets
155
229
  const presets = llmService.getPresets();
156
230
  // Returns presets like:
157
- // - anthropic-claude-3-5-sonnet-20241022-default
231
+ // - anthropic-claude-sonnet-4-20250514-default
232
+ // - anthropic-claude-sonnet-4-20250514-thinking (reasoning enabled)
158
233
  // - openai-gpt-4.1-default
159
- // - google-gemini-2.5-pro
160
- // ... and more
234
+ // - google-gemini-2.5-flash-thinking (reasoning enabled)
235
+ // ... and many more
161
236
  ```
162
237
 
238
+ The thinking presets automatically enable reasoning mode for supported models, making it easy to leverage advanced problem-solving capabilities without manual configuration.
239
+
163
240
  #### Extending Default Presets
164
241
 
165
242
  ```typescript
@@ -213,6 +290,69 @@ const llmService = new LLMService(fromEnvironment, {
213
290
  });
214
291
  ```
215
292
 
293
+ ### Using Presets with Messages
294
+
295
+ You can use presets directly in `sendMessage` calls:
296
+
297
+ ```typescript
298
+ // Send a message using a preset
299
+ const response = await llmService.sendMessage({
300
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
301
+ messages: [{ role: 'user', content: 'Solve this complex problem...' }]
302
+ });
303
+
304
+ // Override preset settings
305
+ const response = await llmService.sendMessage({
306
+ presetId: 'openai-gpt-4.1-default',
307
+ messages: [{ role: 'user', content: 'Write a story' }],
308
+ settings: {
309
+ temperature: 0.9, // Override preset's temperature
310
+ maxTokens: 3000
311
+ }
312
+ });
313
+ ```
314
+
315
+ ### Model-Aware Template Rendering
316
+
317
+ The library provides a powerful `prepareMessage` method that renders templates with model context, allowing you to create adaptive prompts based on model capabilities:
318
+
319
+ ```typescript
320
+ // Prepare a message with model-aware template
321
+ const result = await llmService.prepareMessage({
322
+ template: `
323
+ {{ thinking_enabled ? "Please think step-by-step about this problem:" : "Please analyze this problem:" }}
324
+
325
+ {{ question }}
326
+
327
+ {{ thinking_available && !thinking_enabled ? "(Note: This model supports reasoning mode which could help with complex problems)" : "" }}
328
+ `,
329
+ variables: {
330
+ question: 'What is the optimal algorithm for finding the shortest path in a weighted graph?'
331
+ },
332
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
333
+ });
334
+
335
+ if (result.object !== 'error') {
336
+ // Access the prepared messages and model context
337
+ console.log('Messages:', result.messages);
338
+ console.log('Model context:', result.modelContext);
339
+
340
+ // Send the prepared messages
341
+ const response = await llmService.sendMessage({
342
+ presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking',
343
+ messages: result.messages
344
+ });
345
+ }
346
+ ```
347
+
348
+ The model context includes:
349
+ - `thinking_enabled`: Whether reasoning/thinking is enabled for this request
350
+ - `thinking_available`: Whether the model supports reasoning/thinking
351
+ - `model_id`: The resolved model ID
352
+ - `provider_id`: The resolved provider ID
353
+ - `reasoning_effort`: The reasoning effort level if specified
354
+ - `reasoning_max_tokens`: The reasoning token budget if specified
355
+
216
356
  ### Error Handling
217
357
 
218
358
  ```typescript
@@ -284,26 +424,31 @@ genai-lite is written in TypeScript and provides comprehensive type definitions:
284
424
  ```typescript
285
425
  import type {
286
426
  LLMChatRequest,
427
+ LLMChatRequestWithPreset,
287
428
  LLMResponse,
288
429
  LLMFailureResponse,
289
430
  LLMSettings,
431
+ LLMReasoningSettings,
290
432
  ApiKeyProvider,
291
433
  ModelPreset,
292
434
  LLMServiceOptions,
293
- PresetMode
435
+ PresetMode,
436
+ PrepareMessageOptions,
437
+ ModelContext,
438
+ PrepareMessageResult
294
439
  } from 'genai-lite';
295
440
  ```
296
441
 
297
442
  ## Utilities
298
443
 
299
- genai-lite includes useful utilities for working with LLMs, available through the `genai-lite/utils` subpath:
444
+ genai-lite includes useful utilities for working with LLMs, available through the `genai-lite/prompting` subpath:
300
445
 
301
446
  ### Token Counting
302
447
 
303
448
  Count the number of tokens in a string using OpenAI's tiktoken library:
304
449
 
305
450
  ```typescript
306
- import { countTokens } from 'genai-lite/utils';
451
+ import { countTokens } from 'genai-lite/prompting';
307
452
 
308
453
  const text = 'Hello, this is a sample text for token counting.';
309
454
  const tokenCount = countTokens(text); // Uses gpt-4 tokenizer by default
@@ -320,7 +465,7 @@ const gpt35Tokens = countTokens(text, 'gpt-3.5-turbo');
320
465
  Generate intelligent previews of large text blocks that preserve context:
321
466
 
322
467
  ```typescript
323
- import { getSmartPreview } from 'genai-lite/utils';
468
+ import { getSmartPreview } from 'genai-lite/prompting';
324
469
 
325
470
  const largeCodeFile = `
326
471
  function calculateTotal(items) {
@@ -359,7 +504,7 @@ Combine these utilities to build prompts that fit within model context windows:
359
504
 
360
505
  ```typescript
361
506
  import { LLMService, fromEnvironment } from 'genai-lite';
362
- import { countTokens, getSmartPreview } from 'genai-lite/utils';
507
+ import { countTokens, getSmartPreview } from 'genai-lite/prompting';
363
508
 
364
509
  const llm = new LLMService(fromEnvironment);
365
510
 
@@ -394,7 +539,7 @@ const response = await llm.sendMessage({
394
539
  Generate dynamic prompts and content using the built-in template engine that supports variable substitution and conditional logic:
395
540
 
396
541
  ```typescript
397
- import { renderTemplate } from 'genai-lite/utils';
542
+ import { renderTemplate } from 'genai-lite/prompting';
398
543
 
399
544
  // Simple variable substitution
400
545
  const greeting = renderTemplate('Hello, {{ name }}!', { name: 'World' });
@@ -432,7 +577,7 @@ const result = renderTemplate(complexTemplate, {
432
577
  expertise: 'TypeScript, React, Node.js',
433
578
  task: 'Review the code for best practices',
434
579
  hasFiles: true,
435
- fileList: '- src/index.ts\n- src/utils.ts',
580
+ fileList: '- src/index.ts\n- src/prompting/template.ts',
436
581
  requiresOutput: false
437
582
  });
438
583
  ```
@@ -450,7 +595,7 @@ Combine the template engine with other utilities for powerful prompt generation:
450
595
 
451
596
  ```typescript
452
597
  import { LLMService, fromEnvironment } from 'genai-lite';
453
- import { renderTemplate, countTokens } from 'genai-lite/utils';
598
+ import { renderTemplate, countTokens } from 'genai-lite/prompting';
454
599
 
455
600
  const llm = new LLMService(fromEnvironment);
456
601
 
@@ -491,6 +636,138 @@ const response = await llm.sendMessage({
491
636
  });
492
637
  ```
493
638
 
639
+ ### Prompt Builder Utilities
640
+
641
+ genai-lite provides powerful utilities for building and parsing structured prompts:
642
+
643
+ #### Parsing Messages from Templates
644
+
645
+ Convert template strings with role tags into LLM message arrays:
646
+
647
+ ```typescript
648
+ import { buildMessagesFromTemplate } from 'genai-lite/prompting';
649
+
650
+ const template = `
651
+ <SYSTEM>You are a helpful assistant specialized in {{expertise}}.</SYSTEM>
652
+ <USER>Help me with {{task}}</USER>
653
+ <ASSISTANT>I'll help you with {{task}}. Let me analyze the requirements...</ASSISTANT>
654
+ <USER>Can you provide more details?</USER>
655
+ `;
656
+
657
+ const messages = buildMessagesFromTemplate(template, {
658
+ expertise: 'TypeScript and React',
659
+ task: 'building a custom hook'
660
+ });
661
+
662
+ // Result: Array of LLMMessage objects ready for the API
663
+ // [
664
+ // { role: 'system', content: 'You are a helpful assistant specialized in TypeScript and React.' },
665
+ // { role: 'user', content: 'Help me with building a custom hook' },
666
+ // { role: 'assistant', content: "I'll help you with building a custom hook. Let me analyze..." },
667
+ // { role: 'user', content: 'Can you provide more details?' }
668
+ // ]
669
+ ```
670
+
671
+ #### Extracting Random Variables for Few-Shot Learning
672
+
673
+ Implement few-shot prompting by extracting and shuffling examples:
674
+
675
+ ```typescript
676
+ import { extractRandomVariables, renderTemplate } from 'genai-lite/prompting';
677
+
678
+ // Define examples in your template
679
+ const examplesTemplate = `
680
+ <RANDOM_INPUT>User: Translate "hello" to Spanish</RANDOM_INPUT>
681
+ <RANDOM_OUTPUT>Assistant: The translation of "hello" to Spanish is "hola".</RANDOM_OUTPUT>
682
+
683
+ <RANDOM_INPUT>User: Translate "goodbye" to French</RANDOM_INPUT>
684
+ <RANDOM_OUTPUT>Assistant: The translation of "goodbye" to French is "au revoir".</RANDOM_OUTPUT>
685
+
686
+ <RANDOM_INPUT>User: Translate "thank you" to German</RANDOM_INPUT>
687
+ <RANDOM_OUTPUT>Assistant: The translation of "thank you" to German is "danke".</RANDOM_OUTPUT>
688
+ `;
689
+
690
+ // Extract random variables (shuffled each time)
691
+ const variables = extractRandomVariables(examplesTemplate, { maxPerTag: 2 });
692
+
693
+ // Use in a prompt template
694
+ const promptTemplate = `
695
+ You are a translation assistant. Here are some examples:
696
+
697
+ {{ random_input_1 }}
698
+ {{ random_output_1 }}
699
+
700
+ {{ random_input_2 }}
701
+ {{ random_output_2 }}
702
+
703
+ Now translate: "{{word}}" to {{language}}
704
+ `;
705
+
706
+ const prompt = renderTemplate(promptTemplate, {
707
+ ...variables,
708
+ word: 'please',
709
+ language: 'Italian'
710
+ });
711
+ ```
712
+
713
+ #### Parsing Structured LLM Responses
714
+
715
+ Extract structured data from LLM responses using custom tags:
716
+
717
+ ```typescript
718
+ import { parseStructuredContent } from 'genai-lite/prompting';
719
+
720
+ // Example LLM response with structured output
721
+ const llmResponse = `
722
+ Let me analyze this code for you.
723
+
724
+ <ANALYSIS>
725
+ The code has good structure but could benefit from:
726
+ 1. Better error handling in the API calls
727
+ 2. Memoization for expensive computations
728
+ 3. More descriptive variable names
729
+ </ANALYSIS>
730
+
731
+ <SUGGESTIONS>
732
+ - Add try-catch blocks around async operations
733
+ - Use React.memo() for the expensive component
734
+ - Rename 'data' to 'userData' for clarity
735
+ </SUGGESTIONS>
736
+
737
+ <REFACTORED_CODE>
738
+ const UserProfile = React.memo(({ userId }) => {
739
+ const [userData, setUserData] = useState(null);
740
+
741
+ useEffect(() => {
742
+ fetchUserData(userId)
743
+ .then(setUserData)
744
+ .catch(error => console.error('Failed to load user:', error));
745
+ }, [userId]);
746
+
747
+ return userData ? <Profile data={userData} /> : <Loading />;
748
+ });
749
+ </REFACTORED_CODE>
750
+ `;
751
+
752
+ // Parse the structured content
753
+ const parsed = parseStructuredContent(llmResponse, [
754
+ 'ANALYSIS',
755
+ 'SUGGESTIONS',
756
+ 'REFACTORED_CODE'
757
+ ]);
758
+
759
+ console.log(parsed.ANALYSIS); // The analysis text
760
+ console.log(parsed.SUGGESTIONS); // The suggestions text
761
+ console.log(parsed.REFACTORED_CODE); // The refactored code
762
+ ```
763
+
764
+ These prompt builder utilities enable:
765
+ - **Structured Conversations**: Build multi-turn conversations from templates
766
+ - **Few-Shot Learning**: Randomly sample examples to improve AI responses
767
+ - **Reliable Output Parsing**: Extract specific sections from AI responses
768
+ - **Template Reusability**: Define templates once, use with different variables
769
+ - **Type Safety**: Full TypeScript support with LLMMessage types
770
+
494
771
  ## Contributing
495
772
 
496
773
  Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change.
@@ -6,7 +6,23 @@
6
6
  "providerId": "anthropic",
7
7
  "modelId": "claude-sonnet-4-20250514",
8
8
  "settings": {
9
- "temperature": 0.3
9
+ "temperature": 0.7,
10
+ "reasoning": {
11
+ "enabled": false
12
+ }
13
+ }
14
+ },
15
+ {
16
+ "id": "anthropic-claude-sonnet-4-20250514-thinking",
17
+ "displayName": "Anthropic - Claude Sonnet 4 (Thinking)",
18
+ "description": "Claude Sonnet 4 with reasoning enabled for step-by-step thinking.",
19
+ "providerId": "anthropic",
20
+ "modelId": "claude-sonnet-4-20250514",
21
+ "settings": {
22
+ "temperature": 0.7,
23
+ "reasoning": {
24
+ "enabled": true
25
+ }
10
26
  }
11
27
  },
12
28
  {
@@ -16,7 +32,23 @@
16
32
  "providerId": "anthropic",
17
33
  "modelId": "claude-opus-4-20250514",
18
34
  "settings": {
19
- "temperature": 0.3
35
+ "temperature": 0.7,
36
+ "reasoning": {
37
+ "enabled": false
38
+ }
39
+ }
40
+ },
41
+ {
42
+ "id": "anthropic-claude-opus-4-20250514-thinking",
43
+ "displayName": "Anthropic - Claude Opus 4 (Thinking)",
44
+ "description": "Claude Opus 4 with reasoning enabled for complex problem solving.",
45
+ "providerId": "anthropic",
46
+ "modelId": "claude-opus-4-20250514",
47
+ "settings": {
48
+ "temperature": 0.7,
49
+ "reasoning": {
50
+ "enabled": true
51
+ }
20
52
  }
21
53
  },
22
54
  {
@@ -26,7 +58,23 @@
26
58
  "providerId": "anthropic",
27
59
  "modelId": "claude-3-7-sonnet-20250219",
28
60
  "settings": {
29
- "temperature": 0.3
61
+ "temperature": 0.7,
62
+ "reasoning": {
63
+ "enabled": false
64
+ }
65
+ }
66
+ },
67
+ {
68
+ "id": "anthropic-claude-3-7-sonnet-20250219-thinking",
69
+ "displayName": "Anthropic - Claude 3.7 Sonnet (Thinking)",
70
+ "description": "Claude 3.7 Sonnet with full reasoning output for detailed analysis.",
71
+ "providerId": "anthropic",
72
+ "modelId": "claude-3-7-sonnet-20250219",
73
+ "settings": {
74
+ "temperature": 0.7,
75
+ "reasoning": {
76
+ "enabled": true
77
+ }
30
78
  }
31
79
  },
32
80
  {
@@ -36,7 +84,7 @@
36
84
  "providerId": "anthropic",
37
85
  "modelId": "claude-3-5-sonnet-20241022",
38
86
  "settings": {
39
- "temperature": 0.3
87
+ "temperature": 0.7
40
88
  }
41
89
  },
42
90
  {
@@ -46,7 +94,7 @@
46
94
  "providerId": "anthropic",
47
95
  "modelId": "claude-3-5-haiku-20241022",
48
96
  "settings": {
49
- "temperature": 0.3
97
+ "temperature": 0.7
50
98
  }
51
99
  },
52
100
  {
@@ -56,7 +104,7 @@
56
104
  "providerId": "gemini",
57
105
  "modelId": "gemini-2.5-pro",
58
106
  "settings": {
59
- "temperature": 0.3,
107
+ "temperature": 0.7,
60
108
  "geminiSafetySettings": [
61
109
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
62
110
  {
@@ -78,7 +126,7 @@
78
126
  "providerId": "gemini",
79
127
  "modelId": "gemini-2.5-flash",
80
128
  "settings": {
81
- "temperature": 0.3,
129
+ "temperature": 0.7,
82
130
  "geminiSafetySettings": [
83
131
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
84
132
  {
@@ -90,7 +138,35 @@
90
138
  "threshold": "BLOCK_NONE"
91
139
  },
92
140
  { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
93
- ]
141
+ ],
142
+ "reasoning": {
143
+ "enabled": false
144
+ }
145
+ }
146
+ },
147
+ {
148
+ "id": "google-gemini-2.5-flash-thinking",
149
+ "displayName": "Google - Gemini 2.5 Flash (Thinking)",
150
+ "description": "Gemini 2.5 Flash with dynamic reasoning for adaptive problem solving.",
151
+ "providerId": "gemini",
152
+ "modelId": "gemini-2.5-flash",
153
+ "settings": {
154
+ "temperature": 0.7,
155
+ "geminiSafetySettings": [
156
+ { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
157
+ {
158
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
159
+ "threshold": "BLOCK_NONE"
160
+ },
161
+ {
162
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
163
+ "threshold": "BLOCK_NONE"
164
+ },
165
+ { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
166
+ ],
167
+ "reasoning": {
168
+ "enabled": true
169
+ }
94
170
  }
95
171
  },
96
172
  {
@@ -100,7 +176,7 @@
100
176
  "providerId": "gemini",
101
177
  "modelId": "gemini-2.5-flash-lite-preview-06-17",
102
178
  "settings": {
103
- "temperature": 0.3,
179
+ "temperature": 0.7,
104
180
  "geminiSafetySettings": [
105
181
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
106
182
  {
@@ -112,7 +188,35 @@
112
188
  "threshold": "BLOCK_NONE"
113
189
  },
114
190
  { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
115
- ]
191
+ ],
192
+ "reasoning": {
193
+ "enabled": false
194
+ }
195
+ }
196
+ },
197
+ {
198
+ "id": "google-gemini-2.5-flash-lite-preview-thinking",
199
+ "displayName": "Google - Gemini 2.5 Flash-Lite Preview (Thinking)",
200
+ "description": "Gemini 2.5 Flash-Lite with dynamic reasoning for efficient thinking.",
201
+ "providerId": "gemini",
202
+ "modelId": "gemini-2.5-flash-lite-preview-06-17",
203
+ "settings": {
204
+ "temperature": 0.7,
205
+ "geminiSafetySettings": [
206
+ { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
207
+ {
208
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
209
+ "threshold": "BLOCK_NONE"
210
+ },
211
+ {
212
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
213
+ "threshold": "BLOCK_NONE"
214
+ },
215
+ { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
216
+ ],
217
+ "reasoning": {
218
+ "enabled": true
219
+ }
116
220
  }
117
221
  },
118
222
  {
@@ -122,7 +226,7 @@
122
226
  "providerId": "gemini",
123
227
  "modelId": "gemini-2.0-flash",
124
228
  "settings": {
125
- "temperature": 0.3,
229
+ "temperature": 0.7,
126
230
  "geminiSafetySettings": [
127
231
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
128
232
  {
@@ -144,7 +248,7 @@
144
248
  "providerId": "gemini",
145
249
  "modelId": "gemini-2.0-flash-lite",
146
250
  "settings": {
147
- "temperature": 0.3,
251
+ "temperature": 0.7,
148
252
  "geminiSafetySettings": [
149
253
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
150
254
  {
@@ -176,7 +280,7 @@
176
280
  "providerId": "openai",
177
281
  "modelId": "gpt-4.1",
178
282
  "settings": {
179
- "temperature": 0.3
283
+ "temperature": 0.7
180
284
  }
181
285
  },
182
286
  {
@@ -186,7 +290,7 @@
186
290
  "providerId": "openai",
187
291
  "modelId": "gpt-4.1-mini",
188
292
  "settings": {
189
- "temperature": 0.3
293
+ "temperature": 0.7
190
294
  }
191
295
  },
192
296
  {
@@ -196,7 +300,7 @@
196
300
  "providerId": "openai",
197
301
  "modelId": "gpt-4.1-nano",
198
302
  "settings": {
199
- "temperature": 0.3
303
+ "temperature": 0.7
200
304
  }
201
305
  },
202
306
  {
@@ -206,7 +310,7 @@
206
310
  "providerId": "mistral",
207
311
  "modelId": "codestral-2501",
208
312
  "settings": {
209
- "temperature": 0.3
313
+ "temperature": 0.7
210
314
  }
211
315
  },
212
316
  {
@@ -216,7 +320,7 @@
216
320
  "providerId": "mistral",
217
321
  "modelId": "devstral-small-2505",
218
322
  "settings": {
219
- "temperature": 0.3
323
+ "temperature": 0.7
220
324
  }
221
325
  }
222
326
  ]
package/dist/index.d.ts CHANGED
@@ -5,4 +5,7 @@ export type { ModelPreset } from "./types/presets";
5
5
  export * from "./llm/types";
6
6
  export * from "./llm/clients/types";
7
7
  export { fromEnvironment } from "./providers/fromEnvironment";
8
- export { renderTemplate } from "./utils/templateEngine";
8
+ export { renderTemplate } from "./prompting/template";
9
+ export { countTokens, getSmartPreview, extractRandomVariables } from "./prompting/content";
10
+ export { buildMessagesFromTemplate } from "./prompting/builder";
11
+ export { parseStructuredContent } from "./prompting/parser";
package/dist/index.js CHANGED
@@ -14,7 +14,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- exports.renderTemplate = exports.fromEnvironment = exports.LLMService = void 0;
17
+ exports.parseStructuredContent = exports.buildMessagesFromTemplate = exports.extractRandomVariables = exports.getSmartPreview = exports.countTokens = exports.renderTemplate = exports.fromEnvironment = exports.LLMService = void 0;
18
18
  // --- LLM Service ---
19
19
  var LLMService_1 = require("./llm/LLMService");
20
20
  Object.defineProperty(exports, "LLMService", { enumerable: true, get: function () { return LLMService_1.LLMService; } });
@@ -26,5 +26,13 @@ __exportStar(require("./llm/clients/types"), exports);
26
26
  var fromEnvironment_1 = require("./providers/fromEnvironment");
27
27
  Object.defineProperty(exports, "fromEnvironment", { enumerable: true, get: function () { return fromEnvironment_1.fromEnvironment; } });
28
28
  // --- Utilities ---
29
- var templateEngine_1 = require("./utils/templateEngine");
30
- Object.defineProperty(exports, "renderTemplate", { enumerable: true, get: function () { return templateEngine_1.renderTemplate; } });
29
+ var template_1 = require("./prompting/template");
30
+ Object.defineProperty(exports, "renderTemplate", { enumerable: true, get: function () { return template_1.renderTemplate; } });
31
+ var content_1 = require("./prompting/content");
32
+ Object.defineProperty(exports, "countTokens", { enumerable: true, get: function () { return content_1.countTokens; } });
33
+ Object.defineProperty(exports, "getSmartPreview", { enumerable: true, get: function () { return content_1.getSmartPreview; } });
34
+ Object.defineProperty(exports, "extractRandomVariables", { enumerable: true, get: function () { return content_1.extractRandomVariables; } });
35
+ var builder_1 = require("./prompting/builder");
36
+ Object.defineProperty(exports, "buildMessagesFromTemplate", { enumerable: true, get: function () { return builder_1.buildMessagesFromTemplate; } });
37
+ var parser_1 = require("./prompting/parser");
38
+ Object.defineProperty(exports, "parseStructuredContent", { enumerable: true, get: function () { return parser_1.parseStructuredContent; } });