genai-lite 0.1.4 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +289 -12
  2. package/dist/config/presets.json +121 -17
  3. package/dist/index.d.ts +4 -1
  4. package/dist/index.js +11 -3
  5. package/dist/llm/LLMService.d.ts +39 -2
  6. package/dist/llm/LLMService.js +291 -78
  7. package/dist/llm/LLMService.prepareMessage.test.js +303 -0
  8. package/dist/llm/LLMService.sendMessage.preset.test.js +153 -0
  9. package/dist/llm/LLMService.test.js +83 -0
  10. package/dist/llm/clients/AnthropicClientAdapter.js +64 -10
  11. package/dist/llm/clients/AnthropicClientAdapter.test.js +7 -1
  12. package/dist/llm/clients/GeminiClientAdapter.js +70 -11
  13. package/dist/llm/clients/GeminiClientAdapter.test.js +121 -1
  14. package/dist/llm/clients/MockClientAdapter.test.js +7 -1
  15. package/dist/llm/clients/OpenAIClientAdapter.js +26 -10
  16. package/dist/llm/clients/OpenAIClientAdapter.test.js +7 -1
  17. package/dist/llm/config.js +112 -2
  18. package/dist/llm/config.test.js +17 -0
  19. package/dist/llm/types.d.ts +106 -0
  20. package/dist/prompting/builder.d.ts +34 -0
  21. package/dist/prompting/builder.js +112 -0
  22. package/dist/prompting/builder.test.d.ts +4 -0
  23. package/dist/prompting/builder.test.js +109 -0
  24. package/dist/prompting/content.d.ts +57 -0
  25. package/dist/prompting/content.js +146 -0
  26. package/dist/prompting/content.test.d.ts +4 -0
  27. package/dist/prompting/content.test.js +212 -0
  28. package/dist/prompting/index.d.ts +13 -0
  29. package/dist/prompting/index.js +26 -0
  30. package/dist/prompting/parser.d.ts +41 -0
  31. package/dist/prompting/parser.js +56 -0
  32. package/dist/prompting/parser.test.d.ts +4 -0
  33. package/dist/prompting/parser.test.js +116 -0
  34. package/dist/{utils/templateEngine.d.ts → prompting/template.d.ts} +7 -0
  35. package/dist/{utils/templateEngine.js → prompting/template.js} +7 -0
  36. package/dist/prompting/template.test.d.ts +1 -0
  37. package/dist/{utils/templateEngine.test.js → prompting/template.test.js} +30 -30
  38. package/package.json +7 -6
  39. package/src/config/presets.json +122 -17
  40. package/dist/utils/index.d.ts +0 -2
  41. package/dist/utils/index.js +0 -18
  42. package/dist/utils/prompt.d.ts +0 -6
  43. package/dist/utils/prompt.js +0 -55
  44. package/dist/utils/prompt.test.js +0 -115
  45. /package/dist/{utils/prompt.test.d.ts → llm/LLMService.prepareMessage.test.d.ts} +0 -0
  46. /package/dist/{utils/templateEngine.test.d.ts → llm/LLMService.sendMessage.preset.test.d.ts} +0 -0
@@ -6,7 +6,23 @@
6
6
  "providerId": "anthropic",
7
7
  "modelId": "claude-sonnet-4-20250514",
8
8
  "settings": {
9
- "temperature": 0.3
9
+ "temperature": 0.7,
10
+ "reasoning": {
11
+ "enabled": false
12
+ }
13
+ }
14
+ },
15
+ {
16
+ "id": "anthropic-claude-sonnet-4-20250514-thinking",
17
+ "displayName": "Anthropic - Claude Sonnet 4 (Thinking)",
18
+ "description": "Claude Sonnet 4 with reasoning enabled for step-by-step thinking.",
19
+ "providerId": "anthropic",
20
+ "modelId": "claude-sonnet-4-20250514",
21
+ "settings": {
22
+ "temperature": 0.7,
23
+ "reasoning": {
24
+ "enabled": true
25
+ }
10
26
  }
11
27
  },
12
28
  {
@@ -16,7 +32,24 @@
16
32
  "providerId": "anthropic",
17
33
  "modelId": "claude-opus-4-20250514",
18
34
  "settings": {
19
- "temperature": 0.3
35
+ "temperature": 0.7,
36
+ "reasoning": {
37
+ "enabled": false
38
+ }
39
+
40
+ }
41
+ },
42
+ {
43
+ "id": "anthropic-claude-opus-4-20250514-thinking",
44
+ "displayName": "Anthropic - Claude Opus 4 (Thinking)",
45
+ "description": "Claude Opus 4 with reasoning enabled for complex problem solving.",
46
+ "providerId": "anthropic",
47
+ "modelId": "claude-opus-4-20250514",
48
+ "settings": {
49
+ "temperature": 0.7,
50
+ "reasoning": {
51
+ "enabled": true
52
+ }
20
53
  }
21
54
  },
22
55
  {
@@ -26,7 +59,23 @@
26
59
  "providerId": "anthropic",
27
60
  "modelId": "claude-3-7-sonnet-20250219",
28
61
  "settings": {
29
- "temperature": 0.3
62
+ "temperature": 0.7,
63
+ "reasoning": {
64
+ "enabled": false
65
+ }
66
+ }
67
+ },
68
+ {
69
+ "id": "anthropic-claude-3-7-sonnet-20250219-thinking",
70
+ "displayName": "Anthropic - Claude 3.7 Sonnet (Thinking)",
71
+ "description": "Claude 3.7 Sonnet with full reasoning output for detailed analysis.",
72
+ "providerId": "anthropic",
73
+ "modelId": "claude-3-7-sonnet-20250219",
74
+ "settings": {
75
+ "temperature": 0.7,
76
+ "reasoning": {
77
+ "enabled": true
78
+ }
30
79
  }
31
80
  },
32
81
  {
@@ -36,7 +85,7 @@
36
85
  "providerId": "anthropic",
37
86
  "modelId": "claude-3-5-sonnet-20241022",
38
87
  "settings": {
39
- "temperature": 0.3
88
+ "temperature": 0.7
40
89
  }
41
90
  },
42
91
  {
@@ -46,7 +95,7 @@
46
95
  "providerId": "anthropic",
47
96
  "modelId": "claude-3-5-haiku-20241022",
48
97
  "settings": {
49
- "temperature": 0.3
98
+ "temperature": 0.7
50
99
  }
51
100
  },
52
101
  {
@@ -56,7 +105,7 @@
56
105
  "providerId": "gemini",
57
106
  "modelId": "gemini-2.5-pro",
58
107
  "settings": {
59
- "temperature": 0.3,
108
+ "temperature": 0.7,
60
109
  "geminiSafetySettings": [
61
110
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
62
111
  {
@@ -78,7 +127,7 @@
78
127
  "providerId": "gemini",
79
128
  "modelId": "gemini-2.5-flash",
80
129
  "settings": {
81
- "temperature": 0.3,
130
+ "temperature": 0.7,
82
131
  "geminiSafetySettings": [
83
132
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
84
133
  {
@@ -90,7 +139,35 @@
90
139
  "threshold": "BLOCK_NONE"
91
140
  },
92
141
  { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
93
- ]
142
+ ],
143
+ "reasoning": {
144
+ "enabled": false
145
+ }
146
+ }
147
+ },
148
+ {
149
+ "id": "google-gemini-2.5-flash-thinking",
150
+ "displayName": "Google - Gemini 2.5 Flash (Thinking)",
151
+ "description": "Gemini 2.5 Flash with dynamic reasoning for adaptive problem solving.",
152
+ "providerId": "gemini",
153
+ "modelId": "gemini-2.5-flash",
154
+ "settings": {
155
+ "temperature": 0.7,
156
+ "geminiSafetySettings": [
157
+ { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
158
+ {
159
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
160
+ "threshold": "BLOCK_NONE"
161
+ },
162
+ {
163
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
164
+ "threshold": "BLOCK_NONE"
165
+ },
166
+ { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
167
+ ],
168
+ "reasoning": {
169
+ "enabled": true
170
+ }
94
171
  }
95
172
  },
96
173
  {
@@ -100,7 +177,7 @@
100
177
  "providerId": "gemini",
101
178
  "modelId": "gemini-2.5-flash-lite-preview-06-17",
102
179
  "settings": {
103
- "temperature": 0.3,
180
+ "temperature": 0.7,
104
181
  "geminiSafetySettings": [
105
182
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
106
183
  {
@@ -112,7 +189,35 @@
112
189
  "threshold": "BLOCK_NONE"
113
190
  },
114
191
  { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
115
- ]
192
+ ],
193
+ "reasoning": {
194
+ "enabled": false
195
+ }
196
+ }
197
+ },
198
+ {
199
+ "id": "google-gemini-2.5-flash-lite-preview-thinking",
200
+ "displayName": "Google - Gemini 2.5 Flash-Lite Preview (Thinking)",
201
+ "description": "Gemini 2.5 Flash-Lite with dynamic reasoning for efficient thinking.",
202
+ "providerId": "gemini",
203
+ "modelId": "gemini-2.5-flash-lite-preview-06-17",
204
+ "settings": {
205
+ "temperature": 0.7,
206
+ "geminiSafetySettings": [
207
+ { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
208
+ {
209
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
210
+ "threshold": "BLOCK_NONE"
211
+ },
212
+ {
213
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
214
+ "threshold": "BLOCK_NONE"
215
+ },
216
+ { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE" }
217
+ ],
218
+ "reasoning": {
219
+ "enabled": true
220
+ }
116
221
  }
117
222
  },
118
223
  {
@@ -122,7 +227,7 @@
122
227
  "providerId": "gemini",
123
228
  "modelId": "gemini-2.0-flash",
124
229
  "settings": {
125
- "temperature": 0.3,
230
+ "temperature": 0.7,
126
231
  "geminiSafetySettings": [
127
232
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
128
233
  {
@@ -144,7 +249,7 @@
144
249
  "providerId": "gemini",
145
250
  "modelId": "gemini-2.0-flash-lite",
146
251
  "settings": {
147
- "temperature": 0.3,
252
+ "temperature": 0.7,
148
253
  "geminiSafetySettings": [
149
254
  { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE" },
150
255
  {
@@ -176,7 +281,7 @@
176
281
  "providerId": "openai",
177
282
  "modelId": "gpt-4.1",
178
283
  "settings": {
179
- "temperature": 0.3
284
+ "temperature": 0.7
180
285
  }
181
286
  },
182
287
  {
@@ -186,7 +291,7 @@
186
291
  "providerId": "openai",
187
292
  "modelId": "gpt-4.1-mini",
188
293
  "settings": {
189
- "temperature": 0.3
294
+ "temperature": 0.7
190
295
  }
191
296
  },
192
297
  {
@@ -196,7 +301,7 @@
196
301
  "providerId": "openai",
197
302
  "modelId": "gpt-4.1-nano",
198
303
  "settings": {
199
- "temperature": 0.3
304
+ "temperature": 0.7
200
305
  }
201
306
  },
202
307
  {
@@ -206,7 +311,7 @@
206
311
  "providerId": "mistral",
207
312
  "modelId": "codestral-2501",
208
313
  "settings": {
209
- "temperature": 0.3
314
+ "temperature": 0.7
210
315
  }
211
316
  },
212
317
  {
@@ -216,7 +321,7 @@
216
321
  "providerId": "mistral",
217
322
  "modelId": "devstral-small-2505",
218
323
  "settings": {
219
- "temperature": 0.3
324
+ "temperature": 0.7
220
325
  }
221
326
  }
222
327
  ]
@@ -1,2 +0,0 @@
1
- export * from './prompt';
2
- export * from './templateEngine';
@@ -1,18 +0,0 @@
1
- "use strict";
2
- var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
- if (k2 === undefined) k2 = k;
4
- var desc = Object.getOwnPropertyDescriptor(m, k);
5
- if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
- desc = { enumerable: true, get: function() { return m[k]; } };
7
- }
8
- Object.defineProperty(o, k2, desc);
9
- }) : (function(o, m, k, k2) {
10
- if (k2 === undefined) k2 = k;
11
- o[k2] = m[k];
12
- }));
13
- var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
- for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
- };
16
- Object.defineProperty(exports, "__esModule", { value: true });
17
- __exportStar(require("./prompt"), exports);
18
- __exportStar(require("./templateEngine"), exports);
@@ -1,6 +0,0 @@
1
- import { TiktokenModel } from 'js-tiktoken';
2
- export declare function countTokens(text: string, model?: TiktokenModel): number;
3
- export declare function getSmartPreview(content: string, config: {
4
- minLines: number;
5
- maxLines: number;
6
- }): string;
@@ -1,55 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.countTokens = countTokens;
4
- exports.getSmartPreview = getSmartPreview;
5
- const js_tiktoken_1 = require("js-tiktoken");
6
- const tokenizerCache = new Map();
7
- function getTokenizer(model) {
8
- if (tokenizerCache.has(model)) {
9
- return tokenizerCache.get(model);
10
- }
11
- try {
12
- const tokenizer = (0, js_tiktoken_1.encodingForModel)(model);
13
- tokenizerCache.set(model, tokenizer);
14
- return tokenizer;
15
- }
16
- catch (error) {
17
- console.error(`Failed to initialize tokenizer for model ${model}:`, error);
18
- throw error;
19
- }
20
- }
21
- function countTokens(text, model = 'gpt-4') {
22
- if (!text)
23
- return 0;
24
- try {
25
- const tokenizer = getTokenizer(model);
26
- return tokenizer.encode(text).length;
27
- }
28
- catch (error) {
29
- // Fallback to a rough estimate if tokenizer fails for any reason
30
- return Math.ceil(text.length / 4);
31
- }
32
- }
33
- function getSmartPreview(content, config) {
34
- const lines = content.split('\n');
35
- // If the file is not longer than maxLines, return it in full
36
- if (lines.length <= config.maxLines) {
37
- return content;
38
- }
39
- // Always show at least minLines
40
- let endLine = config.minLines;
41
- let emptyLinesCount = lines
42
- .slice(0, config.minLines)
43
- .filter((line) => line.trim() === '').length;
44
- // If we haven't found at least two empty lines, keep looking up to maxLines
45
- if (emptyLinesCount < 2 && lines.length > config.minLines) {
46
- for (let i = config.minLines; i < Math.min(lines.length, config.maxLines); i++) {
47
- if (lines[i].trim() === '') {
48
- endLine = i + 1; // Include the empty line
49
- break;
50
- }
51
- endLine = i + 1;
52
- }
53
- }
54
- return lines.slice(0, endLine).join('\n') + '\n... (content truncated)';
55
- }
@@ -1,115 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- const prompt_1 = require("./prompt");
4
- describe('Prompt Utilities', () => {
5
- describe('countTokens', () => {
6
- it('should return 0 for empty string', () => {
7
- expect((0, prompt_1.countTokens)('')).toBe(0);
8
- });
9
- it('should count tokens for simple text', () => {
10
- const text = 'Hello, world!';
11
- const count = (0, prompt_1.countTokens)(text);
12
- expect(count).toBeGreaterThan(0);
13
- expect(count).toBeLessThan(text.length); // Tokens are typically fewer than characters
14
- });
15
- it('should count tokens with default gpt-4 model', () => {
16
- const text = 'The quick brown fox jumps over the lazy dog';
17
- const count = (0, prompt_1.countTokens)(text);
18
- expect(count).toBeGreaterThan(0);
19
- });
20
- it('should count tokens with different models', () => {
21
- const text = 'Testing different models';
22
- const gpt4Count = (0, prompt_1.countTokens)(text, 'gpt-4');
23
- const gpt35Count = (0, prompt_1.countTokens)(text, 'gpt-3.5-turbo');
24
- expect(gpt4Count).toBeGreaterThan(0);
25
- expect(gpt35Count).toBeGreaterThan(0);
26
- });
27
- it('should handle special characters and emojis', () => {
28
- const text = '🚀 Special chars: @#$% and \n\t newlines';
29
- const count = (0, prompt_1.countTokens)(text);
30
- expect(count).toBeGreaterThan(0);
31
- });
32
- it('should fallback to estimate for invalid model', () => {
33
- const text = 'Test fallback behavior';
34
- const count = (0, prompt_1.countTokens)(text, 'invalid-model');
35
- // Should fallback to length/4 estimate
36
- expect(count).toBe(Math.ceil(text.length / 4));
37
- });
38
- it('should handle very long text', () => {
39
- const longText = 'a'.repeat(10000);
40
- const count = (0, prompt_1.countTokens)(longText);
41
- expect(count).toBeGreaterThan(0);
42
- expect(count).toBeLessThan(longText.length);
43
- });
44
- });
45
- describe('getSmartPreview', () => {
46
- const config = { minLines: 5, maxLines: 10 };
47
- it('should return full content if shorter than maxLines', () => {
48
- const content = 'Line 1\nLine 2\nLine 3';
49
- const preview = (0, prompt_1.getSmartPreview)(content, config);
50
- expect(preview).toBe(content);
51
- });
52
- it('should truncate at maxLines if no empty lines found', () => {
53
- const lines = Array.from({ length: 20 }, (_, i) => `Line ${i + 1}`);
54
- const content = lines.join('\n');
55
- const preview = (0, prompt_1.getSmartPreview)(content, config);
56
- const previewLines = preview.split('\n');
57
- // Should extend up to maxLines when no empty lines are found
58
- expect(previewLines.length).toBe(config.maxLines + 1); // +1 for truncation message
59
- expect(preview).toContain('... (content truncated)');
60
- });
61
- it('should extend to next empty line within maxLines', () => {
62
- const content = `Line 1
63
- Line 2
64
- Line 3
65
- Line 4
66
- Line 5
67
- Line 6
68
-
69
- Line 8
70
- Line 9
71
- Line 10
72
- Line 11`;
73
- const preview = (0, prompt_1.getSmartPreview)(content, config);
74
- const previewLines = preview.split('\n');
75
- // Should include up to line 7 (the empty line)
76
- expect(previewLines[6]).toBe('');
77
- expect(preview).toContain('... (content truncated)');
78
- });
79
- it('should handle content exactly at maxLines', () => {
80
- const lines = Array.from({ length: config.maxLines }, (_, i) => `Line ${i + 1}`);
81
- const content = lines.join('\n');
82
- const preview = (0, prompt_1.getSmartPreview)(content, config);
83
- expect(preview).toBe(content);
84
- });
85
- it('should handle empty content', () => {
86
- const preview = (0, prompt_1.getSmartPreview)('', config);
87
- expect(preview).toBe('');
88
- });
89
- it('should handle content with multiple consecutive empty lines', () => {
90
- const content = `Line 1
91
- Line 2
92
-
93
-
94
- Line 5
95
- Line 6
96
-
97
- Line 8
98
- Line 9
99
- Line 10
100
- Line 11`;
101
- const preview = (0, prompt_1.getSmartPreview)(content, config);
102
- const previewLines = preview.split('\n');
103
- // Should stop at first empty line after minLines
104
- expect(previewLines.length).toBeLessThanOrEqual(config.maxLines + 1);
105
- expect(preview).toContain('... (content truncated)');
106
- });
107
- it('should respect maxLines limit even with empty lines', () => {
108
- const lines = Array.from({ length: 15 }, (_, i) => i % 3 === 0 ? '' : `Line ${i + 1}`);
109
- const content = lines.join('\n');
110
- const preview = (0, prompt_1.getSmartPreview)(content, config);
111
- const previewLines = preview.split('\n');
112
- expect(previewLines.length).toBeLessThanOrEqual(config.maxLines + 1);
113
- });
114
- });
115
- });