genai-lite 0.1.0 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/README.md +194 -1
  2. package/dist/config/presets.json +222 -0
  3. package/dist/index.d.ts +2 -0
  4. package/dist/llm/LLMService.d.ts +25 -1
  5. package/dist/llm/LLMService.js +34 -1
  6. package/dist/llm/LLMService.presets.test.d.ts +1 -0
  7. package/dist/llm/LLMService.presets.test.js +210 -0
  8. package/dist/llm/LLMService.test.d.ts +1 -0
  9. package/dist/llm/LLMService.test.js +279 -0
  10. package/dist/llm/clients/AnthropicClientAdapter.test.d.ts +1 -0
  11. package/dist/llm/clients/AnthropicClientAdapter.test.js +263 -0
  12. package/dist/llm/clients/GeminiClientAdapter.test.d.ts +1 -0
  13. package/dist/llm/clients/GeminiClientAdapter.test.js +281 -0
  14. package/dist/llm/clients/MockClientAdapter.test.d.ts +1 -0
  15. package/dist/llm/clients/MockClientAdapter.test.js +240 -0
  16. package/dist/llm/clients/OpenAIClientAdapter.test.d.ts +1 -0
  17. package/dist/llm/clients/OpenAIClientAdapter.test.js +248 -0
  18. package/dist/llm/clients/adapterErrorUtils.test.d.ts +1 -0
  19. package/dist/llm/clients/adapterErrorUtils.test.js +123 -0
  20. package/dist/llm/config.test.d.ts +1 -0
  21. package/dist/llm/config.test.js +159 -0
  22. package/dist/providers/fromEnvironment.test.d.ts +1 -0
  23. package/dist/providers/fromEnvironment.test.js +46 -0
  24. package/dist/types/presets.d.ts +19 -0
  25. package/dist/types/presets.js +2 -0
  26. package/dist/utils/index.d.ts +1 -0
  27. package/dist/utils/index.js +17 -0
  28. package/dist/utils/prompt.d.ts +6 -0
  29. package/dist/utils/prompt.js +55 -0
  30. package/dist/utils/prompt.test.d.ts +1 -0
  31. package/dist/utils/prompt.test.js +115 -0
  32. package/package.json +22 -4
  33. package/src/config/presets.json +222 -0
@@ -0,0 +1,159 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const config_1 = require("./config");
4
+ describe('LLM Config', () => {
5
+ describe('isProviderSupported', () => {
6
+ it('should correctly identify supported providers', () => {
7
+ expect((0, config_1.isProviderSupported)('openai')).toBe(true);
8
+ expect((0, config_1.isProviderSupported)('anthropic')).toBe(true);
9
+ expect((0, config_1.isProviderSupported)('gemini')).toBe(true);
10
+ });
11
+ it('should return false for unsupported providers', () => {
12
+ expect((0, config_1.isProviderSupported)('unsupported-provider')).toBe(false);
13
+ expect((0, config_1.isProviderSupported)('')).toBe(false);
14
+ });
15
+ });
16
+ describe('getProviderById', () => {
17
+ it('should return provider info for valid providers', () => {
18
+ const openaiProvider = (0, config_1.getProviderById)('openai');
19
+ expect(openaiProvider).toBeDefined();
20
+ expect(openaiProvider?.id).toBe('openai');
21
+ expect(openaiProvider?.name).toBe('OpenAI');
22
+ });
23
+ it('should return undefined for invalid providers', () => {
24
+ expect((0, config_1.getProviderById)('unsupported-provider')).toBeUndefined();
25
+ expect((0, config_1.getProviderById)('')).toBeUndefined();
26
+ });
27
+ });
28
+ describe('getModelById', () => {
29
+ it('should return model info for valid model and provider combination', () => {
30
+ const model = (0, config_1.getModelById)('gpt-4.1', 'openai');
31
+ expect(model).toBeDefined();
32
+ expect(model?.id).toBe('gpt-4.1');
33
+ expect(model?.providerId).toBe('openai');
34
+ });
35
+ it('should return undefined for invalid model or provider', () => {
36
+ expect((0, config_1.getModelById)('invalid-model', 'openai')).toBeUndefined();
37
+ expect((0, config_1.getModelById)('gpt-4.1', 'anthropic')).toBeUndefined();
38
+ expect((0, config_1.getModelById)('gpt-4.1', 'invalid-provider')).toBeUndefined();
39
+ });
40
+ });
41
+ describe('getModelsByProvider', () => {
42
+ it('should return models for valid providers', () => {
43
+ const openaiModels = (0, config_1.getModelsByProvider)('openai');
44
+ expect(openaiModels.length).toBeGreaterThan(0);
45
+ expect(openaiModels.every(model => model.providerId === 'openai')).toBe(true);
46
+ });
47
+ it('should return empty array for invalid providers', () => {
48
+ expect((0, config_1.getModelsByProvider)('invalid-provider')).toEqual([]);
49
+ expect((0, config_1.getModelsByProvider)('')).toEqual([]);
50
+ });
51
+ });
52
+ describe('isModelSupported', () => {
53
+ it('should correctly identify supported model/provider combinations', () => {
54
+ expect((0, config_1.isModelSupported)('gpt-4.1', 'openai')).toBe(true);
55
+ expect((0, config_1.isModelSupported)('claude-sonnet-4-20250514', 'anthropic')).toBe(true);
56
+ expect((0, config_1.isModelSupported)('gemini-2.5-pro', 'gemini')).toBe(true);
57
+ });
58
+ it('should return false for unsupported combinations', () => {
59
+ expect((0, config_1.isModelSupported)('gpt-4.1', 'anthropic')).toBe(false);
60
+ expect((0, config_1.isModelSupported)('claude-sonnet-4-20250514', 'openai')).toBe(false);
61
+ expect((0, config_1.isModelSupported)('invalid-model', 'openai')).toBe(false);
62
+ });
63
+ });
64
+ describe('getDefaultSettingsForModel', () => {
65
+ it('should return default settings for valid models', () => {
66
+ const settings = (0, config_1.getDefaultSettingsForModel)('gpt-4.1', 'openai');
67
+ expect(settings).toBeDefined();
68
+ expect(settings.temperature).toBeDefined();
69
+ expect(settings.maxTokens).toBeDefined();
70
+ expect(settings.topP).toBeDefined();
71
+ });
72
+ it('should apply model-specific overrides', () => {
73
+ const gpt4Settings = (0, config_1.getDefaultSettingsForModel)('gpt-4.1', 'openai');
74
+ const gpt4MiniSettings = (0, config_1.getDefaultSettingsForModel)('gpt-4.1-mini', 'openai');
75
+ // These might have different maxTokens based on model capabilities
76
+ expect(gpt4Settings.maxTokens).toBeDefined();
77
+ expect(gpt4MiniSettings.maxTokens).toBeDefined();
78
+ });
79
+ });
80
+ describe('validateLLMSettings', () => {
81
+ it('should return empty array for valid settings', () => {
82
+ const validSettings = {
83
+ temperature: 0.7,
84
+ maxTokens: 1000,
85
+ topP: 0.9,
86
+ frequencyPenalty: 0.5,
87
+ presencePenalty: -0.5,
88
+ stopSequences: ['\\n', 'END'],
89
+ user: 'test-user'
90
+ };
91
+ expect((0, config_1.validateLLMSettings)(validSettings)).toEqual([]);
92
+ });
93
+ it('should validate temperature bounds', () => {
94
+ expect((0, config_1.validateLLMSettings)({ temperature: -0.1 })).toContain('temperature must be a number between 0 and 2');
95
+ expect((0, config_1.validateLLMSettings)({ temperature: 2.1 })).toContain('temperature must be a number between 0 and 2');
96
+ expect((0, config_1.validateLLMSettings)({ temperature: 'invalid' })).toContain('temperature must be a number between 0 and 2');
97
+ });
98
+ it('should validate maxTokens', () => {
99
+ expect((0, config_1.validateLLMSettings)({ maxTokens: 0 })).toContain('maxTokens must be an integer between 1 and 100000');
100
+ expect((0, config_1.validateLLMSettings)({ maxTokens: 100001 })).toContain('maxTokens must be an integer between 1 and 100000');
101
+ expect((0, config_1.validateLLMSettings)({ maxTokens: 1.5 })).toContain('maxTokens must be an integer between 1 and 100000');
102
+ });
103
+ it('should validate topP bounds', () => {
104
+ expect((0, config_1.validateLLMSettings)({ topP: -0.1 })).toContain('topP must be a number between 0 and 1');
105
+ expect((0, config_1.validateLLMSettings)({ topP: 1.1 })).toContain('topP must be a number between 0 and 1');
106
+ });
107
+ it('should validate frequencyPenalty bounds', () => {
108
+ expect((0, config_1.validateLLMSettings)({ frequencyPenalty: -2.1 })).toContain('frequencyPenalty must be a number between -2 and 2');
109
+ expect((0, config_1.validateLLMSettings)({ frequencyPenalty: 2.1 })).toContain('frequencyPenalty must be a number between -2 and 2');
110
+ });
111
+ it('should validate presencePenalty bounds', () => {
112
+ expect((0, config_1.validateLLMSettings)({ presencePenalty: -2.1 })).toContain('presencePenalty must be a number between -2 and 2');
113
+ expect((0, config_1.validateLLMSettings)({ presencePenalty: 2.1 })).toContain('presencePenalty must be a number between -2 and 2');
114
+ });
115
+ it('should validate stopSequences', () => {
116
+ expect((0, config_1.validateLLMSettings)({ stopSequences: 'invalid' })).toContain('stopSequences must be an array');
117
+ expect((0, config_1.validateLLMSettings)({ stopSequences: ['1', '2', '3', '4', '5'] })).toContain('stopSequences can contain at most 4 sequences');
118
+ expect((0, config_1.validateLLMSettings)({ stopSequences: ['valid', ''] })).toContain('stopSequences must contain only non-empty strings');
119
+ expect((0, config_1.validateLLMSettings)({ stopSequences: ['valid', 123] })).toContain('stopSequences must contain only non-empty strings');
120
+ });
121
+ it('should validate user field', () => {
122
+ expect((0, config_1.validateLLMSettings)({ user: 123 })).toContain('user must be a string');
123
+ });
124
+ it('should validate geminiSafetySettings', () => {
125
+ const invalidSettings = { geminiSafetySettings: 'invalid' };
126
+ expect((0, config_1.validateLLMSettings)(invalidSettings)).toContain('geminiSafetySettings must be an array');
127
+ const invalidCategory = {
128
+ geminiSafetySettings: [
129
+ { category: 'INVALID_CATEGORY', threshold: 'BLOCK_NONE' }
130
+ ]
131
+ };
132
+ expect((0, config_1.validateLLMSettings)(invalidCategory)).toContain('geminiSafetySettings[0].category must be a valid Gemini harm category');
133
+ const invalidThreshold = {
134
+ geminiSafetySettings: [
135
+ { category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'INVALID_THRESHOLD' }
136
+ ]
137
+ };
138
+ expect((0, config_1.validateLLMSettings)(invalidThreshold)).toContain('geminiSafetySettings[0].threshold must be a valid Gemini harm block threshold');
139
+ const validGeminiSettings = {
140
+ geminiSafetySettings: [
141
+ { category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_NONE' }
142
+ ]
143
+ };
144
+ expect((0, config_1.validateLLMSettings)(validGeminiSettings)).toEqual([]);
145
+ });
146
+ it('should return multiple errors for multiple invalid fields', () => {
147
+ const invalidSettings = {
148
+ temperature: -1,
149
+ maxTokens: 0,
150
+ topP: 2
151
+ };
152
+ const errors = (0, config_1.validateLLMSettings)(invalidSettings);
153
+ expect(errors).toHaveLength(3);
154
+ expect(errors).toContain('temperature must be a number between 0 and 2');
155
+ expect(errors).toContain('maxTokens must be an integer between 1 and 100000');
156
+ expect(errors).toContain('topP must be a number between 0 and 1');
157
+ });
158
+ });
159
+ });
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,46 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const fromEnvironment_1 = require("./fromEnvironment");
4
+ describe('fromEnvironment', () => {
5
+ const OLD_ENV = process.env;
6
+ beforeEach(() => {
7
+ jest.resetModules(); // Clear cache
8
+ process.env = { ...OLD_ENV }; // Make a copy
9
+ });
10
+ afterAll(() => {
11
+ process.env = OLD_ENV; // Restore old environment
12
+ });
13
+ it('should retrieve an existing environment variable', async () => {
14
+ process.env.OPENAI_API_KEY = 'test-openai-key';
15
+ const key = await (0, fromEnvironment_1.fromEnvironment)('openai');
16
+ expect(key).toBe('test-openai-key');
17
+ });
18
+ it('should convert provider ID to uppercase', async () => {
19
+ process.env.ANTHROPIC_API_KEY = 'test-anthropic-key';
20
+ const key = await (0, fromEnvironment_1.fromEnvironment)('anthropic');
21
+ expect(key).toBe('test-anthropic-key');
22
+ });
23
+ it('should handle mixed case provider IDs', async () => {
24
+ process.env.GEMINI_API_KEY = 'test-gemini-key';
25
+ const key = await (0, fromEnvironment_1.fromEnvironment)('GeMiNi');
26
+ expect(key).toBe('test-gemini-key');
27
+ });
28
+ it('should return null for a non-existent environment variable', async () => {
29
+ const key = await (0, fromEnvironment_1.fromEnvironment)('nonexistent');
30
+ expect(key).toBeNull();
31
+ });
32
+ it('should return null for empty provider ID', async () => {
33
+ const key = await (0, fromEnvironment_1.fromEnvironment)('');
34
+ expect(key).toBeNull();
35
+ });
36
+ it('should handle special characters in provider ID', async () => {
37
+ process.env['PROVIDER-123_API_KEY'] = 'test-special-key';
38
+ const key = await (0, fromEnvironment_1.fromEnvironment)('provider-123');
39
+ expect(key).toBe('test-special-key');
40
+ });
41
+ it('should return null when environment variable exists but is empty', async () => {
42
+ process.env.EMPTY_API_KEY = '';
43
+ const key = await (0, fromEnvironment_1.fromEnvironment)('empty');
44
+ expect(key).toBeNull(); // Empty string is falsy, so || null returns null
45
+ });
46
+ });
@@ -0,0 +1,19 @@
1
+ import type { ApiProviderId, LLMSettings } from '../llm/types';
2
+ /**
3
+ * Represents a model preset with pre-configured LLM settings
4
+ * optimized for common use cases.
5
+ */
6
+ export interface ModelPreset {
7
+ /** Unique preset identifier, e.g., "gemini-pro-creative-writing" */
8
+ id: string;
9
+ /** User-friendly display name, e.g., "Google Gemini - gemini-1.5-pro-002 (Creative)" */
10
+ displayName: string;
11
+ /** Optional description of the preset's intended use case */
12
+ description?: string;
13
+ /** Provider ID that matches an entry in SUPPORTED_PROVIDERS */
14
+ providerId: ApiProviderId;
15
+ /** Model ID that matches a supported model for the given providerId */
16
+ modelId: string;
17
+ /** Preset-specific LLM settings, can include provider-specific configurations */
18
+ settings: Partial<LLMSettings>;
19
+ }
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1 @@
1
+ export * from './prompt';
@@ -0,0 +1,17 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
+ };
16
+ Object.defineProperty(exports, "__esModule", { value: true });
17
+ __exportStar(require("./prompt"), exports);
@@ -0,0 +1,6 @@
1
+ import { TiktokenModel } from 'js-tiktoken';
2
+ export declare function countTokens(text: string, model?: TiktokenModel): number;
3
+ export declare function getSmartPreview(content: string, config: {
4
+ minLines: number;
5
+ maxLines: number;
6
+ }): string;
@@ -0,0 +1,55 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.countTokens = countTokens;
4
+ exports.getSmartPreview = getSmartPreview;
5
+ const js_tiktoken_1 = require("js-tiktoken");
6
+ const tokenizerCache = new Map();
7
+ function getTokenizer(model) {
8
+ if (tokenizerCache.has(model)) {
9
+ return tokenizerCache.get(model);
10
+ }
11
+ try {
12
+ const tokenizer = (0, js_tiktoken_1.encodingForModel)(model);
13
+ tokenizerCache.set(model, tokenizer);
14
+ return tokenizer;
15
+ }
16
+ catch (error) {
17
+ console.error(`Failed to initialize tokenizer for model ${model}:`, error);
18
+ throw error;
19
+ }
20
+ }
21
+ function countTokens(text, model = 'gpt-4') {
22
+ if (!text)
23
+ return 0;
24
+ try {
25
+ const tokenizer = getTokenizer(model);
26
+ return tokenizer.encode(text).length;
27
+ }
28
+ catch (error) {
29
+ // Fallback to a rough estimate if tokenizer fails for any reason
30
+ return Math.ceil(text.length / 4);
31
+ }
32
+ }
33
+ function getSmartPreview(content, config) {
34
+ const lines = content.split('\n');
35
+ // If the file is not longer than maxLines, return it in full
36
+ if (lines.length <= config.maxLines) {
37
+ return content;
38
+ }
39
+ // Always show at least minLines
40
+ let endLine = config.minLines;
41
+ let emptyLinesCount = lines
42
+ .slice(0, config.minLines)
43
+ .filter((line) => line.trim() === '').length;
44
+ // If we haven't found at least two empty lines, keep looking up to maxLines
45
+ if (emptyLinesCount < 2 && lines.length > config.minLines) {
46
+ for (let i = config.minLines; i < Math.min(lines.length, config.maxLines); i++) {
47
+ if (lines[i].trim() === '') {
48
+ endLine = i + 1; // Include the empty line
49
+ break;
50
+ }
51
+ endLine = i + 1;
52
+ }
53
+ }
54
+ return lines.slice(0, endLine).join('\n') + '\n... (content truncated)';
55
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,115 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const prompt_1 = require("./prompt");
4
+ describe('Prompt Utilities', () => {
5
+ describe('countTokens', () => {
6
+ it('should return 0 for empty string', () => {
7
+ expect((0, prompt_1.countTokens)('')).toBe(0);
8
+ });
9
+ it('should count tokens for simple text', () => {
10
+ const text = 'Hello, world!';
11
+ const count = (0, prompt_1.countTokens)(text);
12
+ expect(count).toBeGreaterThan(0);
13
+ expect(count).toBeLessThan(text.length); // Tokens are typically fewer than characters
14
+ });
15
+ it('should count tokens with default gpt-4 model', () => {
16
+ const text = 'The quick brown fox jumps over the lazy dog';
17
+ const count = (0, prompt_1.countTokens)(text);
18
+ expect(count).toBeGreaterThan(0);
19
+ });
20
+ it('should count tokens with different models', () => {
21
+ const text = 'Testing different models';
22
+ const gpt4Count = (0, prompt_1.countTokens)(text, 'gpt-4');
23
+ const gpt35Count = (0, prompt_1.countTokens)(text, 'gpt-3.5-turbo');
24
+ expect(gpt4Count).toBeGreaterThan(0);
25
+ expect(gpt35Count).toBeGreaterThan(0);
26
+ });
27
+ it('should handle special characters and emojis', () => {
28
+ const text = '🚀 Special chars: @#$% and \n\t newlines';
29
+ const count = (0, prompt_1.countTokens)(text);
30
+ expect(count).toBeGreaterThan(0);
31
+ });
32
+ it('should fallback to estimate for invalid model', () => {
33
+ const text = 'Test fallback behavior';
34
+ const count = (0, prompt_1.countTokens)(text, 'invalid-model');
35
+ // Should fallback to length/4 estimate
36
+ expect(count).toBe(Math.ceil(text.length / 4));
37
+ });
38
+ it('should handle very long text', () => {
39
+ const longText = 'a'.repeat(10000);
40
+ const count = (0, prompt_1.countTokens)(longText);
41
+ expect(count).toBeGreaterThan(0);
42
+ expect(count).toBeLessThan(longText.length);
43
+ });
44
+ });
45
+ describe('getSmartPreview', () => {
46
+ const config = { minLines: 5, maxLines: 10 };
47
+ it('should return full content if shorter than maxLines', () => {
48
+ const content = 'Line 1\nLine 2\nLine 3';
49
+ const preview = (0, prompt_1.getSmartPreview)(content, config);
50
+ expect(preview).toBe(content);
51
+ });
52
+ it('should truncate at maxLines if no empty lines found', () => {
53
+ const lines = Array.from({ length: 20 }, (_, i) => `Line ${i + 1}`);
54
+ const content = lines.join('\n');
55
+ const preview = (0, prompt_1.getSmartPreview)(content, config);
56
+ const previewLines = preview.split('\n');
57
+ // Should extend up to maxLines when no empty lines are found
58
+ expect(previewLines.length).toBe(config.maxLines + 1); // +1 for truncation message
59
+ expect(preview).toContain('... (content truncated)');
60
+ });
61
+ it('should extend to next empty line within maxLines', () => {
62
+ const content = `Line 1
63
+ Line 2
64
+ Line 3
65
+ Line 4
66
+ Line 5
67
+ Line 6
68
+
69
+ Line 8
70
+ Line 9
71
+ Line 10
72
+ Line 11`;
73
+ const preview = (0, prompt_1.getSmartPreview)(content, config);
74
+ const previewLines = preview.split('\n');
75
+ // Should include up to line 7 (the empty line)
76
+ expect(previewLines[6]).toBe('');
77
+ expect(preview).toContain('... (content truncated)');
78
+ });
79
+ it('should handle content exactly at maxLines', () => {
80
+ const lines = Array.from({ length: config.maxLines }, (_, i) => `Line ${i + 1}`);
81
+ const content = lines.join('\n');
82
+ const preview = (0, prompt_1.getSmartPreview)(content, config);
83
+ expect(preview).toBe(content);
84
+ });
85
+ it('should handle empty content', () => {
86
+ const preview = (0, prompt_1.getSmartPreview)('', config);
87
+ expect(preview).toBe('');
88
+ });
89
+ it('should handle content with multiple consecutive empty lines', () => {
90
+ const content = `Line 1
91
+ Line 2
92
+
93
+
94
+ Line 5
95
+ Line 6
96
+
97
+ Line 8
98
+ Line 9
99
+ Line 10
100
+ Line 11`;
101
+ const preview = (0, prompt_1.getSmartPreview)(content, config);
102
+ const previewLines = preview.split('\n');
103
+ // Should stop at first empty line after minLines
104
+ expect(previewLines.length).toBeLessThanOrEqual(config.maxLines + 1);
105
+ expect(preview).toContain('... (content truncated)');
106
+ });
107
+ it('should respect maxLines limit even with empty lines', () => {
108
+ const lines = Array.from({ length: 15 }, (_, i) => i % 3 === 0 ? '' : `Line ${i + 1}`);
109
+ const content = lines.join('\n');
110
+ const preview = (0, prompt_1.getSmartPreview)(content, config);
111
+ const previewLines = preview.split('\n');
112
+ expect(previewLines.length).toBeLessThanOrEqual(config.maxLines + 1);
113
+ });
114
+ });
115
+ });
package/package.json CHANGED
@@ -1,9 +1,21 @@
1
1
  {
2
2
  "name": "genai-lite",
3
- "version": "0.1.0",
3
+ "version": "0.1.3",
4
4
  "description": "A lightweight, portable toolkit for interacting with various Generative AI APIs.",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
7
+ "exports": {
8
+ ".": {
9
+ "import": "./dist/index.js",
10
+ "require": "./dist/index.js",
11
+ "types": "./dist/index.d.ts"
12
+ },
13
+ "./utils": {
14
+ "import": "./dist/utils/index.js",
15
+ "require": "./dist/utils/index.js",
16
+ "types": "./dist/utils/index.d.ts"
17
+ }
18
+ },
7
19
  "author": "Luigi Acerbi <luigi.acerbi@gmail.com>",
8
20
  "license": "MIT",
9
21
  "funding": {
@@ -18,15 +30,21 @@
18
30
  "url": "https://github.com/lacerbi/genai-lite/issues"
19
31
  },
20
32
  "homepage": "https://github.com/lacerbi/genai-lite#readme",
33
+ "files": [
34
+ "dist",
35
+ "src/config/presets.json"
36
+ ],
21
37
  "scripts": {
22
38
  "build": "tsc",
23
39
  "test": "jest --coverage",
24
- "test:watch": "jest --watch"
40
+ "test:watch": "jest --watch",
41
+ "test:e2e": "npm run build && jest --config jest.e2e.config.js"
25
42
  },
26
43
  "dependencies": {
27
- "@anthropic-ai/sdk": "^0.52.0",
44
+ "@anthropic-ai/sdk": "^0.56.0",
28
45
  "@google/genai": "^1.0.1",
29
- "openai": "^4.103.0"
46
+ "js-tiktoken": "^1.0.20",
47
+ "openai": "^5.8.2"
30
48
  },
31
49
  "devDependencies": {
32
50
  "@types/jest": ">=30.0.0",