genai-lite 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +241 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +27 -0
- package/dist/llm/LLMService.d.ts +86 -0
- package/dist/llm/LLMService.js +410 -0
- package/dist/llm/clients/AnthropicClientAdapter.d.ts +84 -0
- package/dist/llm/clients/AnthropicClientAdapter.js +281 -0
- package/dist/llm/clients/GeminiClientAdapter.d.ts +83 -0
- package/dist/llm/clients/GeminiClientAdapter.js +266 -0
- package/dist/llm/clients/MockClientAdapter.d.ts +69 -0
- package/dist/llm/clients/MockClientAdapter.js +284 -0
- package/dist/llm/clients/OpenAIClientAdapter.d.ts +69 -0
- package/dist/llm/clients/OpenAIClientAdapter.js +227 -0
- package/dist/llm/clients/adapterErrorUtils.d.ts +26 -0
- package/dist/llm/clients/adapterErrorUtils.js +107 -0
- package/dist/llm/clients/types.d.ts +65 -0
- package/dist/llm/clients/types.js +19 -0
- package/dist/llm/config.d.ts +90 -0
- package/dist/llm/config.js +508 -0
- package/dist/llm/types.d.ts +155 -0
- package/dist/llm/types.js +14 -0
- package/dist/providers/fromEnvironment.d.ts +8 -0
- package/dist/providers/fromEnvironment.js +14 -0
- package/dist/types.d.ts +1 -0
- package/dist/types.js +2 -0
- package/package.json +38 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Luigi Acerbi
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
# genai-lite
|
|
2
|
+
|
|
3
|
+
A lightweight, portable Node.js/TypeScript library providing a unified interface for interacting with multiple Generative AI providers (OpenAI, Anthropic, Google Gemini, Mistral, and more).
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- 🔌 **Unified API** - Single interface for multiple AI providers
|
|
8
|
+
- 🔐 **Flexible API Key Management** - Bring your own key storage solution
|
|
9
|
+
- 📦 **Zero Electron Dependencies** - Works in any Node.js environment
|
|
10
|
+
- 🎯 **TypeScript First** - Full type safety and IntelliSense support
|
|
11
|
+
- ⚡ **Lightweight** - Minimal dependencies, focused functionality
|
|
12
|
+
- 🛡️ **Provider Normalization** - Consistent responses across different AI APIs
|
|
13
|
+
|
|
14
|
+
## Installation
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
npm install genai-lite
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
## Quick Start
|
|
21
|
+
|
|
22
|
+
```typescript
|
|
23
|
+
import { LLMService, fromEnvironment } from 'genai-lite';
|
|
24
|
+
|
|
25
|
+
// Create service with environment variable API key provider
|
|
26
|
+
const llmService = new LLMService(fromEnvironment);
|
|
27
|
+
|
|
28
|
+
// Send a message to OpenAI
|
|
29
|
+
const response = await llmService.sendMessage({
|
|
30
|
+
providerId: 'openai',
|
|
31
|
+
modelId: 'gpt-4.1-mini',
|
|
32
|
+
messages: [
|
|
33
|
+
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
34
|
+
{ role: 'user', content: 'Hello, how are you?' }
|
|
35
|
+
]
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
if (response.object === 'chat.completion') {
|
|
39
|
+
console.log(response.choices[0].message.content);
|
|
40
|
+
} else {
|
|
41
|
+
console.error('Error:', response.error.message);
|
|
42
|
+
}
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
## API Key Management
|
|
46
|
+
|
|
47
|
+
genai-lite uses a flexible API key provider pattern. You can use the built-in environment variable provider or create your own:
|
|
48
|
+
|
|
49
|
+
### Environment Variables (Built-in)
|
|
50
|
+
|
|
51
|
+
```typescript
|
|
52
|
+
import { fromEnvironment } from 'genai-lite';
|
|
53
|
+
|
|
54
|
+
// Expects environment variables like:
|
|
55
|
+
// OPENAI_API_KEY=sk-...
|
|
56
|
+
// ANTHROPIC_API_KEY=sk-ant-...
|
|
57
|
+
// GEMINI_API_KEY=...
|
|
58
|
+
|
|
59
|
+
const llmService = new LLMService(fromEnvironment);
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
### Custom API Key Provider
|
|
63
|
+
|
|
64
|
+
```typescript
|
|
65
|
+
import { ApiKeyProvider, LLMService } from 'genai-lite';
|
|
66
|
+
|
|
67
|
+
// Create your own provider
|
|
68
|
+
const myKeyProvider: ApiKeyProvider = async (providerId: string) => {
|
|
69
|
+
// Fetch from your secure storage, vault, etc.
|
|
70
|
+
const key = await mySecureStorage.getKey(providerId);
|
|
71
|
+
return key || null;
|
|
72
|
+
};
|
|
73
|
+
|
|
74
|
+
const llmService = new LLMService(myKeyProvider);
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
## Supported Providers & Models
|
|
78
|
+
|
|
79
|
+
**Note:** Model IDs include version dates for precise model selection. Always use the exact model ID as shown below.
|
|
80
|
+
|
|
81
|
+
### Anthropic (Claude)
|
|
82
|
+
- **Claude 4** (Latest generation):
|
|
83
|
+
- `claude-sonnet-4-20250514` - Balanced performance model
|
|
84
|
+
- `claude-opus-4-20250514` - Most powerful for complex tasks
|
|
85
|
+
- **Claude 3.7**: `claude-3-7-sonnet-20250219` - Advanced reasoning
|
|
86
|
+
- **Claude 3.5**:
|
|
87
|
+
- `claude-3-5-sonnet-20241022` - Best balance of speed and intelligence
|
|
88
|
+
- `claude-3-5-haiku-20241022` - Fast and cost-effective
|
|
89
|
+
|
|
90
|
+
### Google Gemini
|
|
91
|
+
- **Gemini 2.5** (Latest generation):
|
|
92
|
+
- `gemini-2.5-pro` - Most advanced multimodal capabilities
|
|
93
|
+
- `gemini-2.5-flash` - Fast with large context window
|
|
94
|
+
- `gemini-2.5-flash-lite-preview-06-17` - Most cost-effective
|
|
95
|
+
- **Gemini 2.0**:
|
|
96
|
+
- `gemini-2.0-flash` - High performance multimodal
|
|
97
|
+
- `gemini-2.0-flash-lite` - Lightweight version
|
|
98
|
+
|
|
99
|
+
### OpenAI
|
|
100
|
+
- **o4 series**: `o4-mini` - Advanced reasoning model
|
|
101
|
+
- **GPT-4.1 series**:
|
|
102
|
+
- `gpt-4.1` - Latest GPT-4 with enhanced capabilities
|
|
103
|
+
- `gpt-4.1-mini` - Cost-effective for most tasks
|
|
104
|
+
- `gpt-4.1-nano` - Ultra-efficient version
|
|
105
|
+
|
|
106
|
+
### Mistral
|
|
107
|
+
> **Note:** The official Mistral adapter is under development. Requests made to Mistral models will currently be handled by a mock adapter for API compatibility testing.
|
|
108
|
+
|
|
109
|
+
- `codestral-2501` - Specialized for code generation
|
|
110
|
+
- `devstral-small-2505` - Compact development-focused model
|
|
111
|
+
|
|
112
|
+
## Advanced Usage
|
|
113
|
+
|
|
114
|
+
### Custom Settings
|
|
115
|
+
|
|
116
|
+
```typescript
|
|
117
|
+
const response = await llmService.sendMessage({
|
|
118
|
+
providerId: 'anthropic',
|
|
119
|
+
modelId: 'claude-3-5-haiku-20241022',
|
|
120
|
+
messages: [{ role: 'user', content: 'Write a haiku' }],
|
|
121
|
+
settings: {
|
|
122
|
+
temperature: 0.7,
|
|
123
|
+
maxTokens: 100,
|
|
124
|
+
topP: 0.9,
|
|
125
|
+
stopSequences: ['\n\n']
|
|
126
|
+
}
|
|
127
|
+
});
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
### Provider Information
|
|
131
|
+
|
|
132
|
+
```typescript
|
|
133
|
+
// Get list of supported providers
|
|
134
|
+
const providers = await llmService.getProviders();
|
|
135
|
+
|
|
136
|
+
// Get models for a specific provider
|
|
137
|
+
const models = await llmService.getModels('anthropic');
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
### Error Handling
|
|
141
|
+
|
|
142
|
+
```typescript
|
|
143
|
+
const response = await llmService.sendMessage({
|
|
144
|
+
providerId: 'openai',
|
|
145
|
+
modelId: 'gpt-4.1-mini',
|
|
146
|
+
messages: [{ role: 'user', content: 'Hello' }]
|
|
147
|
+
});
|
|
148
|
+
|
|
149
|
+
if (response.object === 'error') {
|
|
150
|
+
switch (response.error.type) {
|
|
151
|
+
case 'authentication_error':
|
|
152
|
+
console.error('Invalid API key');
|
|
153
|
+
break;
|
|
154
|
+
case 'rate_limit_error':
|
|
155
|
+
console.error('Rate limit exceeded');
|
|
156
|
+
break;
|
|
157
|
+
case 'validation_error':
|
|
158
|
+
console.error('Invalid request:', response.error.message);
|
|
159
|
+
break;
|
|
160
|
+
default:
|
|
161
|
+
console.error('Error:', response.error.message);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
```
|
|
165
|
+
|
|
166
|
+
## Using with Electron
|
|
167
|
+
|
|
168
|
+
`genai-lite` is designed to work seamlessly within an Electron application's main process, especially when paired with a secure storage solution like `genai-key-storage-lite`.
|
|
169
|
+
|
|
170
|
+
This is the recommended pattern for both new Electron apps and for migrating from older, integrated versions.
|
|
171
|
+
|
|
172
|
+
### Example with `genai-key-storage-lite`
|
|
173
|
+
|
|
174
|
+
Here’s how to create a custom `ApiKeyProvider` that uses `genai-key-storage-lite` to securely retrieve API keys.
|
|
175
|
+
|
|
176
|
+
```typescript
|
|
177
|
+
// In your Electron app's main process (e.g., main.ts)
|
|
178
|
+
import { app } from 'electron';
|
|
179
|
+
import { ApiKeyServiceMain } from 'genai-key-storage-lite';
|
|
180
|
+
import { LLMService, type ApiKeyProvider } from 'genai-lite';
|
|
181
|
+
|
|
182
|
+
// 1. Initialize Electron's secure key storage service
|
|
183
|
+
const apiKeyService = new ApiKeyServiceMain(app.getPath("userData"));
|
|
184
|
+
|
|
185
|
+
// 2. Create a custom ApiKeyProvider that uses the secure storage
|
|
186
|
+
const electronKeyProvider: ApiKeyProvider = async (providerId) => {
|
|
187
|
+
try {
|
|
188
|
+
// Use withDecryptedKey to securely access the key only when needed.
|
|
189
|
+
// The key is passed to the callback and its result is returned.
|
|
190
|
+
return await apiKeyService.withDecryptedKey(providerId, async (key) => key);
|
|
191
|
+
} catch {
|
|
192
|
+
// If key is not found or decryption fails, return null.
|
|
193
|
+
// LLMService will handle this as an authentication error.
|
|
194
|
+
return null;
|
|
195
|
+
}
|
|
196
|
+
};
|
|
197
|
+
|
|
198
|
+
// 3. Initialize the genai-lite service with our custom provider
|
|
199
|
+
const llmService = new LLMService(electronKeyProvider);
|
|
200
|
+
|
|
201
|
+
// Now you can use llmService anywhere in your main process.
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
## TypeScript Support
|
|
205
|
+
|
|
206
|
+
genai-lite is written in TypeScript and provides comprehensive type definitions:
|
|
207
|
+
|
|
208
|
+
```typescript
|
|
209
|
+
import type {
|
|
210
|
+
LLMChatRequest,
|
|
211
|
+
LLMResponse,
|
|
212
|
+
LLMFailureResponse,
|
|
213
|
+
LLMSettings,
|
|
214
|
+
ApiKeyProvider
|
|
215
|
+
} from 'genai-lite';
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
## Contributing
|
|
219
|
+
|
|
220
|
+
Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change.
|
|
221
|
+
|
|
222
|
+
### Development
|
|
223
|
+
|
|
224
|
+
```bash
|
|
225
|
+
# Install dependencies
|
|
226
|
+
npm install
|
|
227
|
+
|
|
228
|
+
# Build the project
|
|
229
|
+
npm run build
|
|
230
|
+
|
|
231
|
+
# Run tests (when available)
|
|
232
|
+
npm test
|
|
233
|
+
```
|
|
234
|
+
|
|
235
|
+
## License
|
|
236
|
+
|
|
237
|
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
|
238
|
+
|
|
239
|
+
## Acknowledgments
|
|
240
|
+
|
|
241
|
+
Originally developed as part of the Athanor project, genai-lite has been extracted and made standalone to benefit the wider developer community.
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
|
+
};
|
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
exports.fromEnvironment = exports.LLMService = void 0;
|
|
18
|
+
// --- LLM Service ---
|
|
19
|
+
var LLMService_1 = require("./llm/LLMService");
|
|
20
|
+
Object.defineProperty(exports, "LLMService", { enumerable: true, get: function () { return LLMService_1.LLMService; } });
|
|
21
|
+
// Export all core request/response/config types from the LLM module
|
|
22
|
+
__exportStar(require("./llm/types"), exports);
|
|
23
|
+
// Export all client adapter types
|
|
24
|
+
__exportStar(require("./llm/clients/types"), exports);
|
|
25
|
+
// --- API Key Providers ---
|
|
26
|
+
var fromEnvironment_1 = require("./providers/fromEnvironment");
|
|
27
|
+
Object.defineProperty(exports, "fromEnvironment", { enumerable: true, get: function () { return fromEnvironment_1.fromEnvironment; } });
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import type { ApiKeyProvider } from '../types';
|
|
2
|
+
import type { LLMChatRequest, LLMResponse, LLMFailureResponse, ProviderInfo, ModelInfo, ApiProviderId } from "./types";
|
|
3
|
+
import type { ILLMClientAdapter } from "./clients/types";
|
|
4
|
+
/**
|
|
5
|
+
* Main process service for LLM operations
|
|
6
|
+
*
|
|
7
|
+
* This service:
|
|
8
|
+
* - Manages LLM provider client adapters
|
|
9
|
+
* - Integrates with ApiKeyServiceMain for secure API key access
|
|
10
|
+
* - Validates requests and applies default settings
|
|
11
|
+
* - Routes requests to appropriate provider adapters
|
|
12
|
+
* - Handles errors and provides standardized responses
|
|
13
|
+
*/
|
|
14
|
+
export declare class LLMService {
|
|
15
|
+
private getApiKey;
|
|
16
|
+
private clientAdapters;
|
|
17
|
+
private mockClientAdapter;
|
|
18
|
+
constructor(getApiKey: ApiKeyProvider);
|
|
19
|
+
/**
|
|
20
|
+
* Gets list of supported LLM providers
|
|
21
|
+
*
|
|
22
|
+
* @returns Promise resolving to array of provider information
|
|
23
|
+
*/
|
|
24
|
+
getProviders(): Promise<ProviderInfo[]>;
|
|
25
|
+
/**
|
|
26
|
+
* Gets list of supported models for a specific provider
|
|
27
|
+
*
|
|
28
|
+
* @param providerId - The provider ID to get models for
|
|
29
|
+
* @returns Promise resolving to array of model information
|
|
30
|
+
*/
|
|
31
|
+
getModels(providerId: ApiProviderId): Promise<ModelInfo[]>;
|
|
32
|
+
/**
|
|
33
|
+
* Sends a chat message to an LLM provider
|
|
34
|
+
*
|
|
35
|
+
* @param request - The LLM chat request
|
|
36
|
+
* @returns Promise resolving to either success or failure response
|
|
37
|
+
*/
|
|
38
|
+
sendMessage(request: LLMChatRequest): Promise<LLMResponse | LLMFailureResponse>;
|
|
39
|
+
/**
|
|
40
|
+
* Validates basic LLM request structure
|
|
41
|
+
*
|
|
42
|
+
* @param request - The request to validate
|
|
43
|
+
* @returns LLMFailureResponse if validation fails, null if valid
|
|
44
|
+
*/
|
|
45
|
+
private validateRequestStructure;
|
|
46
|
+
/**
|
|
47
|
+
* Merges request settings with model-specific and global defaults
|
|
48
|
+
*
|
|
49
|
+
* @param modelId - The model ID to get defaults for
|
|
50
|
+
* @param providerId - The provider ID to get defaults for
|
|
51
|
+
* @param requestSettings - Settings from the request
|
|
52
|
+
* @returns Complete settings object with all required fields
|
|
53
|
+
*/
|
|
54
|
+
private mergeSettingsForModel;
|
|
55
|
+
/**
|
|
56
|
+
* Gets the appropriate client adapter for a provider
|
|
57
|
+
*
|
|
58
|
+
* @param providerId - The provider ID
|
|
59
|
+
* @returns The client adapter to use
|
|
60
|
+
*/
|
|
61
|
+
private getClientAdapter;
|
|
62
|
+
/**
|
|
63
|
+
* Registers a client adapter for a specific provider
|
|
64
|
+
*
|
|
65
|
+
* @param providerId - The provider ID
|
|
66
|
+
* @param adapter - The client adapter implementation
|
|
67
|
+
*/
|
|
68
|
+
registerClientAdapter(providerId: ApiProviderId, adapter: ILLMClientAdapter): void;
|
|
69
|
+
/**
|
|
70
|
+
* Gets information about registered adapters
|
|
71
|
+
*
|
|
72
|
+
* @returns Map of provider IDs to adapter info
|
|
73
|
+
*/
|
|
74
|
+
getRegisteredAdapters(): Map<ApiProviderId, any>;
|
|
75
|
+
/**
|
|
76
|
+
* Gets a summary of available providers and their adapter status
|
|
77
|
+
*
|
|
78
|
+
* @returns Summary of provider availability
|
|
79
|
+
*/
|
|
80
|
+
getProviderSummary(): {
|
|
81
|
+
totalProviders: number;
|
|
82
|
+
providersWithAdapters: number;
|
|
83
|
+
availableProviders: string[];
|
|
84
|
+
unavailableProviders: string[];
|
|
85
|
+
};
|
|
86
|
+
}
|