genai-lite 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,508 @@
1
+ "use strict";
2
+ // AI Summary: Configuration for LLM module including default settings, supported providers, and models.
3
+ // Defines operational parameters and available LLM options for the application.
4
+ Object.defineProperty(exports, "__esModule", { value: true });
5
+ exports.SUPPORTED_MODELS = exports.SUPPORTED_PROVIDERS = exports.MODEL_DEFAULT_SETTINGS = exports.PROVIDER_DEFAULT_SETTINGS = exports.DEFAULT_LLM_SETTINGS = exports.ADAPTER_CONFIGS = exports.ADAPTER_CONSTRUCTORS = void 0;
6
+ exports.getProviderById = getProviderById;
7
+ exports.getModelById = getModelById;
8
+ exports.getModelsByProvider = getModelsByProvider;
9
+ exports.isProviderSupported = isProviderSupported;
10
+ exports.isModelSupported = isModelSupported;
11
+ exports.getDefaultSettingsForModel = getDefaultSettingsForModel;
12
+ exports.validateLLMSettings = validateLLMSettings;
13
+ const OpenAIClientAdapter_1 = require("./clients/OpenAIClientAdapter");
14
+ const AnthropicClientAdapter_1 = require("./clients/AnthropicClientAdapter");
15
+ const GeminiClientAdapter_1 = require("./clients/GeminiClientAdapter");
16
+ // Placeholder for future imports:
17
+ // import { MistralClientAdapter } from './clients/MistralClientAdapter';
18
+ /**
19
+ * Mapping from provider IDs to their corresponding adapter constructor classes
20
+ * This enables dynamic registration of client adapters in LLMServiceMain
21
+ */
22
+ exports.ADAPTER_CONSTRUCTORS = {
23
+ openai: OpenAIClientAdapter_1.OpenAIClientAdapter,
24
+ anthropic: AnthropicClientAdapter_1.AnthropicClientAdapter,
25
+ gemini: GeminiClientAdapter_1.GeminiClientAdapter,
26
+ // 'mistral': MistralClientAdapter, // Uncomment and add when Mistral adapter is ready
27
+ };
28
+ /**
29
+ * Optional configuration objects for each adapter
30
+ * Allows passing parameters like baseURL during instantiation
31
+ */
32
+ exports.ADAPTER_CONFIGS = {
33
+ openai: {
34
+ baseURL: process.env.OPENAI_API_BASE_URL || undefined,
35
+ },
36
+ anthropic: {
37
+ baseURL: process.env.ANTHROPIC_API_BASE_URL || undefined,
38
+ },
39
+ // 'gemini': { /* ... Gemini specific config ... */ },
40
+ // 'mistral': { /* ... Mistral specific config ... */ },
41
+ };
42
+ /**
43
+ * Default settings applied to all LLM requests unless overridden
44
+ */
45
+ exports.DEFAULT_LLM_SETTINGS = {
46
+ temperature: 0.5,
47
+ maxTokens: 4096,
48
+ topP: 0.95,
49
+ stopSequences: [],
50
+ frequencyPenalty: 0.0,
51
+ presencePenalty: 0.0,
52
+ supportsSystemMessage: true,
53
+ user: undefined, // Will be filtered out when undefined
54
+ geminiSafetySettings: [
55
+ { category: "HARM_CATEGORY_HATE_SPEECH", threshold: "BLOCK_NONE" },
56
+ { category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold: "BLOCK_NONE" },
57
+ { category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "BLOCK_NONE" },
58
+ { category: "HARM_CATEGORY_HARASSMENT", threshold: "BLOCK_NONE" },
59
+ ],
60
+ };
61
+ /**
62
+ * Per-provider default setting overrides
63
+ */
64
+ exports.PROVIDER_DEFAULT_SETTINGS = {
65
+ openai: {},
66
+ anthropic: {},
67
+ gemini: {
68
+ geminiSafetySettings: [
69
+ { category: "HARM_CATEGORY_HATE_SPEECH", threshold: "BLOCK_NONE" },
70
+ { category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold: "BLOCK_NONE" },
71
+ { category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "BLOCK_NONE" },
72
+ { category: "HARM_CATEGORY_HARASSMENT", threshold: "BLOCK_NONE" },
73
+ ],
74
+ },
75
+ mistral: {},
76
+ };
77
+ /**
78
+ * Per-model default setting overrides (takes precedence over provider defaults)
79
+ */
80
+ exports.MODEL_DEFAULT_SETTINGS = {
81
+ // OpenAI model-specific overrides
82
+ "o4-mini": { temperature: 1.0 },
83
+ // Anthropic model-specific overrides
84
+ // Gemini model-specific overrides
85
+ // Mistral model-specific overrides
86
+ };
87
+ /**
88
+ * Supported LLM providers
89
+ */
90
+ exports.SUPPORTED_PROVIDERS = [
91
+ {
92
+ id: "openai",
93
+ name: "OpenAI",
94
+ unsupportedParameters: ["frequencyPenalty"],
95
+ },
96
+ {
97
+ id: "anthropic",
98
+ name: "Anthropic",
99
+ },
100
+ {
101
+ id: "gemini",
102
+ name: "Google Gemini",
103
+ },
104
+ {
105
+ id: "mistral",
106
+ name: "Mistral AI",
107
+ },
108
+ ];
109
+ /**
110
+ * Supported LLM models with their configurations
111
+ * ModelInfo is similar to Cline model info
112
+ * See: https://github.com/cline/cline/blob/main/src/shared/api.ts
113
+ */
114
+ exports.SUPPORTED_MODELS = [
115
+ // Anthropic Models
116
+ {
117
+ id: "claude-sonnet-4-20250514",
118
+ name: "Claude Sonnet 4",
119
+ providerId: "anthropic",
120
+ contextWindow: 200000,
121
+ inputPrice: 3.0,
122
+ outputPrice: 15.0,
123
+ description: "Latest Claude Sonnet model with enhanced capabilities",
124
+ maxTokens: 8192,
125
+ supportsImages: true,
126
+ supportsPromptCache: true,
127
+ cacheWritesPrice: 3.75,
128
+ cacheReadsPrice: 0.3,
129
+ },
130
+ {
131
+ id: "claude-opus-4-20250514",
132
+ name: "Claude Opus 4",
133
+ providerId: "anthropic",
134
+ contextWindow: 200000,
135
+ inputPrice: 15.0,
136
+ outputPrice: 75.0,
137
+ description: "Most powerful Claude model for highly complex tasks",
138
+ maxTokens: 8192,
139
+ supportsImages: true,
140
+ supportsPromptCache: true,
141
+ cacheWritesPrice: 18.75,
142
+ cacheReadsPrice: 1.5,
143
+ },
144
+ {
145
+ id: "claude-3-7-sonnet-20250219",
146
+ name: "Claude 3.7 Sonnet",
147
+ providerId: "anthropic",
148
+ contextWindow: 200000,
149
+ inputPrice: 3.0,
150
+ outputPrice: 15.0,
151
+ description: "Advanced Claude model with improved reasoning",
152
+ maxTokens: 8192,
153
+ supportsImages: true,
154
+ supportsPromptCache: true,
155
+ cacheWritesPrice: 3.75,
156
+ cacheReadsPrice: 0.3,
157
+ },
158
+ {
159
+ id: "claude-3-5-sonnet-20241022",
160
+ name: "Claude 3.5 Sonnet",
161
+ providerId: "anthropic",
162
+ contextWindow: 200000,
163
+ inputPrice: 3.0,
164
+ outputPrice: 15.0,
165
+ description: "Best balance of intelligence, speed, and cost",
166
+ maxTokens: 8192,
167
+ supportsImages: true,
168
+ supportsPromptCache: true,
169
+ cacheWritesPrice: 3.75,
170
+ cacheReadsPrice: 0.3,
171
+ },
172
+ {
173
+ id: "claude-3-5-haiku-20241022",
174
+ name: "Claude 3.5 Haiku",
175
+ providerId: "anthropic",
176
+ contextWindow: 200000,
177
+ inputPrice: 0.8,
178
+ outputPrice: 4.0,
179
+ description: "Fastest and most cost-effective Claude model",
180
+ maxTokens: 8192,
181
+ supportsImages: false,
182
+ supportsPromptCache: true,
183
+ cacheWritesPrice: 1.0,
184
+ cacheReadsPrice: 0.08,
185
+ },
186
+ // Google Gemini Models
187
+ {
188
+ id: "gemini-2.5-pro",
189
+ name: "Gemini 2.5 Pro",
190
+ providerId: "gemini",
191
+ contextWindow: 1048576,
192
+ inputPrice: 1.25,
193
+ outputPrice: 10,
194
+ description: "Most advanced Gemini model for complex reasoning and multimodal tasks",
195
+ maxTokens: 65536,
196
+ supportsImages: true,
197
+ supportsPromptCache: true,
198
+ cacheReadsPrice: 0.31,
199
+ },
200
+ {
201
+ id: "gemini-2.5-flash",
202
+ name: "Gemini 2.5 Flash",
203
+ providerId: "gemini",
204
+ contextWindow: 1048576,
205
+ inputPrice: 0.3,
206
+ outputPrice: 2.5,
207
+ description: "Fast, efficient model with large context and reasoning capabilities",
208
+ maxTokens: 65536,
209
+ supportsImages: true,
210
+ supportsPromptCache: true,
211
+ thinkingConfig: {
212
+ maxBudget: 24576,
213
+ outputPrice: 2.5,
214
+ },
215
+ },
216
+ {
217
+ id: "gemini-2.5-flash-lite-preview-06-17",
218
+ name: "Gemini 2.5 Flash-Lite Preview",
219
+ providerId: "gemini",
220
+ contextWindow: 1000000,
221
+ inputPrice: 0.1,
222
+ outputPrice: 0.4,
223
+ description: "Smallest and most cost effective model, built for at scale usage",
224
+ maxTokens: 64000,
225
+ supportsImages: true,
226
+ supportsPromptCache: true,
227
+ },
228
+ {
229
+ id: "gemini-2.0-flash",
230
+ name: "Gemini 2.0 Flash",
231
+ providerId: "gemini",
232
+ contextWindow: 1048576,
233
+ inputPrice: 0.1,
234
+ outputPrice: 0.4,
235
+ description: "High-performance model with multimodal capabilities",
236
+ maxTokens: 8192,
237
+ supportsImages: true,
238
+ supportsPromptCache: true,
239
+ cacheReadsPrice: 0.025,
240
+ cacheWritesPrice: 1.0,
241
+ },
242
+ {
243
+ id: "gemini-2.0-flash-lite",
244
+ name: "Gemini 2.0 Flash Lite",
245
+ providerId: "gemini",
246
+ contextWindow: 1048576,
247
+ inputPrice: 0.075,
248
+ outputPrice: 0.3,
249
+ description: "Lightweight version of Gemini 2.0 Flash",
250
+ maxTokens: 8192,
251
+ supportsImages: true,
252
+ supportsPromptCache: false,
253
+ },
254
+ // OpenAI Models
255
+ {
256
+ id: "o4-mini",
257
+ name: "o4-mini",
258
+ providerId: "openai",
259
+ contextWindow: 200000,
260
+ inputPrice: 1.1,
261
+ outputPrice: 4.4,
262
+ description: "Advanced reasoning model with high token capacity",
263
+ maxTokens: 100000,
264
+ supportsImages: true,
265
+ supportsPromptCache: true,
266
+ cacheReadsPrice: 0.275,
267
+ unsupportedParameters: ["topP"],
268
+ },
269
+ {
270
+ id: "gpt-4.1",
271
+ name: "GPT-4.1",
272
+ providerId: "openai",
273
+ contextWindow: 1047576,
274
+ inputPrice: 2,
275
+ outputPrice: 8,
276
+ description: "Latest GPT-4 model with enhanced capabilities",
277
+ maxTokens: 32768,
278
+ supportsImages: true,
279
+ supportsPromptCache: true,
280
+ cacheReadsPrice: 0.5,
281
+ },
282
+ {
283
+ id: "gpt-4.1-mini",
284
+ name: "GPT-4.1 Mini",
285
+ providerId: "openai",
286
+ contextWindow: 1047576,
287
+ inputPrice: 0.4,
288
+ outputPrice: 1.6,
289
+ description: "Smaller version of GPT-4.1 for cost-effective tasks",
290
+ maxTokens: 32768,
291
+ supportsImages: true,
292
+ supportsPromptCache: true,
293
+ cacheReadsPrice: 0.1,
294
+ },
295
+ {
296
+ id: "gpt-4.1-nano",
297
+ name: "GPT-4.1 Nano",
298
+ providerId: "openai",
299
+ contextWindow: 1047576,
300
+ inputPrice: 0.1,
301
+ outputPrice: 0.4,
302
+ description: "Ultra-efficient version of GPT-4.1",
303
+ maxTokens: 32768,
304
+ supportsImages: true,
305
+ supportsPromptCache: true,
306
+ cacheReadsPrice: 0.025,
307
+ },
308
+ // Mistral AI Models
309
+ {
310
+ id: "codestral-2501",
311
+ name: "Codestral",
312
+ providerId: "mistral",
313
+ contextWindow: 256000,
314
+ inputPrice: 0.3,
315
+ outputPrice: 0.9,
316
+ description: "Specialized model for code generation and programming tasks",
317
+ maxTokens: 256000,
318
+ supportsImages: false,
319
+ supportsPromptCache: false,
320
+ },
321
+ {
322
+ id: "devstral-small-2505",
323
+ name: "Devstral Small",
324
+ providerId: "mistral",
325
+ contextWindow: 131072,
326
+ inputPrice: 0.1,
327
+ outputPrice: 0.3,
328
+ description: "Compact development-focused model",
329
+ maxTokens: 128000,
330
+ supportsImages: false,
331
+ supportsPromptCache: false,
332
+ },
333
+ ];
334
+ /**
335
+ * Gets provider information by ID
336
+ *
337
+ * @param providerId - The provider ID to look up
338
+ * @returns The provider info or undefined if not found
339
+ */
340
+ function getProviderById(providerId) {
341
+ return exports.SUPPORTED_PROVIDERS.find((provider) => provider.id === providerId);
342
+ }
343
+ /**
344
+ * Gets model information by ID and provider
345
+ *
346
+ * @param modelId - The model ID to look up
347
+ * @param providerId - The provider ID to filter by
348
+ * @returns The model info or undefined if not found
349
+ */
350
+ function getModelById(modelId, providerId) {
351
+ return exports.SUPPORTED_MODELS.find((model) => model.id === modelId && (!providerId || model.providerId === providerId));
352
+ }
353
+ /**
354
+ * Gets all models for a specific provider
355
+ *
356
+ * @param providerId - The provider ID to filter by
357
+ * @returns Array of model info for the provider
358
+ */
359
+ function getModelsByProvider(providerId) {
360
+ return exports.SUPPORTED_MODELS.filter((model) => model.providerId === providerId);
361
+ }
362
+ /**
363
+ * Validates if a provider is supported
364
+ *
365
+ * @param providerId - The provider ID to validate
366
+ * @returns True if the provider is supported
367
+ */
368
+ function isProviderSupported(providerId) {
369
+ return exports.SUPPORTED_PROVIDERS.some((provider) => provider.id === providerId);
370
+ }
371
+ /**
372
+ * Validates if a model is supported for a given provider
373
+ *
374
+ * @param modelId - The model ID to validate
375
+ * @param providerId - The provider ID to validate against
376
+ * @returns True if the model is supported for the provider
377
+ */
378
+ function isModelSupported(modelId, providerId) {
379
+ return exports.SUPPORTED_MODELS.some((model) => model.id === modelId && model.providerId === providerId);
380
+ }
381
+ /**
382
+ * Gets merged default settings for a specific model and provider
383
+ *
384
+ * @param modelId - The model ID
385
+ * @param providerId - The provider ID
386
+ * @returns Merged default settings with model-specific overrides applied
387
+ */
388
+ function getDefaultSettingsForModel(modelId, providerId) {
389
+ // Base settings: global defaults, then provider-specific, then model-specific overrides
390
+ const baseDefaults = { ...exports.DEFAULT_LLM_SETTINGS };
391
+ const providerDefaults = exports.PROVIDER_DEFAULT_SETTINGS[providerId] || {};
392
+ const modelDefaults = exports.MODEL_DEFAULT_SETTINGS[modelId] || {};
393
+ // Merge settings in order of precedence
394
+ const mergedSettings = {
395
+ ...baseDefaults,
396
+ ...providerDefaults,
397
+ ...modelDefaults,
398
+ };
399
+ // Override maxTokens from ModelInfo if available
400
+ const modelInfo = getModelById(modelId, providerId);
401
+ if (modelInfo && modelInfo.maxTokens !== undefined) {
402
+ mergedSettings.maxTokens = modelInfo.maxTokens;
403
+ }
404
+ // Filter out undefined values and ensure required fields
405
+ return Object.fromEntries(Object.entries(mergedSettings).filter(([_, value]) => value !== undefined));
406
+ }
407
+ /**
408
+ * Valid Gemini harm categories for validation
409
+ * Only includes categories supported by the API for safety setting rules
410
+ */
411
+ const VALID_GEMINI_HARM_CATEGORIES = [
412
+ "HARM_CATEGORY_HATE_SPEECH",
413
+ "HARM_CATEGORY_SEXUALLY_EXPLICIT",
414
+ "HARM_CATEGORY_DANGEROUS_CONTENT",
415
+ "HARM_CATEGORY_HARASSMENT",
416
+ "HARM_CATEGORY_CIVIC_INTEGRITY",
417
+ ];
418
+ /**
419
+ * Valid Gemini harm block thresholds for validation
420
+ */
421
+ const VALID_GEMINI_HARM_BLOCK_THRESHOLDS = [
422
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
423
+ "BLOCK_LOW_AND_ABOVE",
424
+ "BLOCK_MEDIUM_AND_ABOVE",
425
+ "BLOCK_ONLY_HIGH",
426
+ "BLOCK_NONE",
427
+ ];
428
+ /**
429
+ * Validates LLM settings values
430
+ *
431
+ * @param settings - The settings to validate
432
+ * @returns Array of validation error messages, empty if valid
433
+ */
434
+ function validateLLMSettings(settings) {
435
+ const errors = [];
436
+ if (settings.temperature !== undefined) {
437
+ if (typeof settings.temperature !== "number" ||
438
+ settings.temperature < 0 ||
439
+ settings.temperature > 2) {
440
+ errors.push("temperature must be a number between 0 and 2");
441
+ }
442
+ }
443
+ if (settings.maxTokens !== undefined) {
444
+ if (!Number.isInteger(settings.maxTokens) ||
445
+ settings.maxTokens < 1 ||
446
+ settings.maxTokens > 100000) {
447
+ errors.push("maxTokens must be an integer between 1 and 100000");
448
+ }
449
+ }
450
+ if (settings.topP !== undefined) {
451
+ if (typeof settings.topP !== "number" ||
452
+ settings.topP < 0 ||
453
+ settings.topP > 1) {
454
+ errors.push("topP must be a number between 0 and 1");
455
+ }
456
+ }
457
+ if (settings.frequencyPenalty !== undefined) {
458
+ if (typeof settings.frequencyPenalty !== "number" ||
459
+ settings.frequencyPenalty < -2 ||
460
+ settings.frequencyPenalty > 2) {
461
+ errors.push("frequencyPenalty must be a number between -2 and 2");
462
+ }
463
+ }
464
+ if (settings.presencePenalty !== undefined) {
465
+ if (typeof settings.presencePenalty !== "number" ||
466
+ settings.presencePenalty < -2 ||
467
+ settings.presencePenalty > 2) {
468
+ errors.push("presencePenalty must be a number between -2 and 2");
469
+ }
470
+ }
471
+ if (settings.stopSequences !== undefined) {
472
+ if (!Array.isArray(settings.stopSequences)) {
473
+ errors.push("stopSequences must be an array");
474
+ }
475
+ else if (settings.stopSequences.length > 4) {
476
+ errors.push("stopSequences can contain at most 4 sequences");
477
+ }
478
+ else if (settings.stopSequences.some((seq) => typeof seq !== "string" || seq.length === 0)) {
479
+ errors.push("stopSequences must contain only non-empty strings");
480
+ }
481
+ }
482
+ if (settings.user !== undefined && typeof settings.user !== "string") {
483
+ errors.push("user must be a string");
484
+ }
485
+ if (settings.geminiSafetySettings !== undefined) {
486
+ if (!Array.isArray(settings.geminiSafetySettings)) {
487
+ errors.push("geminiSafetySettings must be an array");
488
+ }
489
+ else {
490
+ for (let i = 0; i < settings.geminiSafetySettings.length; i++) {
491
+ const setting = settings.geminiSafetySettings[i];
492
+ if (!setting || typeof setting !== "object") {
493
+ errors.push(`geminiSafetySettings[${i}] must be an object with category and threshold`);
494
+ continue;
495
+ }
496
+ if (!setting.category ||
497
+ !VALID_GEMINI_HARM_CATEGORIES.includes(setting.category)) {
498
+ errors.push(`geminiSafetySettings[${i}].category must be a valid Gemini harm category`);
499
+ }
500
+ if (!setting.threshold ||
501
+ !VALID_GEMINI_HARM_BLOCK_THRESHOLDS.includes(setting.threshold)) {
502
+ errors.push(`geminiSafetySettings[${i}].threshold must be a valid Gemini harm block threshold`);
503
+ }
504
+ }
505
+ }
506
+ }
507
+ return errors;
508
+ }
@@ -0,0 +1,155 @@
1
+ /**
2
+ * API provider ID type - represents a unique identifier for an AI provider
3
+ */
4
+ export type ApiProviderId = string;
5
+ /**
6
+ * Message roles supported by LLM APIs
7
+ */
8
+ export type LLMMessageRole = 'user' | 'assistant' | 'system';
9
+ /**
10
+ * Individual message in a conversation
11
+ */
12
+ export interface LLMMessage {
13
+ role: LLMMessageRole;
14
+ content: string;
15
+ }
16
+ /**
17
+ * Gemini harm categories for safety settings
18
+ * Only includes categories supported by the API for safety setting rules
19
+ */
20
+ export type GeminiHarmCategory = 'HARM_CATEGORY_UNSPECIFIED' | 'HARM_CATEGORY_HATE_SPEECH' | 'HARM_CATEGORY_SEXUALLY_EXPLICIT' | 'HARM_CATEGORY_DANGEROUS_CONTENT' | 'HARM_CATEGORY_HARASSMENT' | 'HARM_CATEGORY_CIVIC_INTEGRITY';
21
+ /**
22
+ * Gemini harm block thresholds for safety settings
23
+ */
24
+ export type GeminiHarmBlockThreshold = 'HARM_BLOCK_THRESHOLD_UNSPECIFIED' | 'BLOCK_LOW_AND_ABOVE' | 'BLOCK_MEDIUM_AND_ABOVE' | 'BLOCK_ONLY_HIGH' | 'BLOCK_NONE';
25
+ /**
26
+ * Individual Gemini safety setting
27
+ */
28
+ export interface GeminiSafetySetting {
29
+ category: GeminiHarmCategory;
30
+ threshold: GeminiHarmBlockThreshold;
31
+ }
32
+ /**
33
+ * Configurable settings for LLM requests
34
+ */
35
+ export interface LLMSettings {
36
+ /** Controls randomness in the response (0.0 to 2.0, typically 0.0 to 1.0) */
37
+ temperature?: number;
38
+ /** Maximum number of tokens to generate in the response */
39
+ maxTokens?: number;
40
+ /** Controls diversity via nucleus sampling (0.0 to 1.0) */
41
+ topP?: number;
42
+ /** Sequences where the API will stop generating further tokens */
43
+ stopSequences?: string[];
44
+ /** Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency */
45
+ frequencyPenalty?: number;
46
+ /** Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far */
47
+ presencePenalty?: number;
48
+ /** A unique identifier representing your end-user, which can help monitor and detect abuse */
49
+ user?: string;
50
+ /** Whether the LLM supports system message (almost all LLMs do nowadays) */
51
+ supportsSystemMessage?: boolean;
52
+ /** Gemini-specific safety settings for content filtering */
53
+ geminiSafetySettings?: GeminiSafetySetting[];
54
+ }
55
+ /**
56
+ * Request structure for chat completion
57
+ */
58
+ export interface LLMChatRequest {
59
+ providerId: ApiProviderId;
60
+ modelId: string;
61
+ messages: LLMMessage[];
62
+ systemMessage?: string;
63
+ settings?: LLMSettings;
64
+ }
65
+ /**
66
+ * Individual choice in an LLM response
67
+ */
68
+ export interface LLMChoice {
69
+ message: LLMMessage;
70
+ finish_reason: string | null;
71
+ index?: number;
72
+ }
73
+ /**
74
+ * Token usage information from LLM APIs
75
+ */
76
+ export interface LLMUsage {
77
+ prompt_tokens?: number;
78
+ completion_tokens?: number;
79
+ total_tokens?: number;
80
+ }
81
+ /**
82
+ * Successful response from LLM API
83
+ */
84
+ export interface LLMResponse {
85
+ id: string;
86
+ provider: ApiProviderId;
87
+ model: string;
88
+ created: number;
89
+ choices: LLMChoice[];
90
+ usage?: LLMUsage;
91
+ object: 'chat.completion';
92
+ }
93
+ /**
94
+ * Error information from LLM APIs
95
+ */
96
+ export interface LLMError {
97
+ message: string;
98
+ code?: string | number;
99
+ type?: string;
100
+ param?: string;
101
+ providerError?: any;
102
+ }
103
+ /**
104
+ * Error response from LLM operations
105
+ */
106
+ export interface LLMFailureResponse {
107
+ provider: ApiProviderId;
108
+ model?: string;
109
+ error: LLMError;
110
+ object: 'error';
111
+ }
112
+ /**
113
+ * Information about a supported LLM provider
114
+ */
115
+ export interface ProviderInfo {
116
+ id: ApiProviderId;
117
+ name: string;
118
+ unsupportedParameters?: (keyof LLMSettings)[];
119
+ }
120
+ /**
121
+ * Information about a supported LLM model
122
+ */
123
+ export interface ModelInfo {
124
+ id: string;
125
+ name: string;
126
+ providerId: ApiProviderId;
127
+ contextWindow?: number;
128
+ inputPrice?: number;
129
+ outputPrice?: number;
130
+ supportsSystemMessage?: boolean;
131
+ description?: string;
132
+ maxTokens?: number;
133
+ supportsImages?: boolean;
134
+ supportsPromptCache: boolean;
135
+ thinkingConfig?: {
136
+ maxBudget?: number;
137
+ outputPrice?: number;
138
+ };
139
+ cacheWritesPrice?: number;
140
+ cacheReadsPrice?: number;
141
+ unsupportedParameters?: (keyof LLMSettings)[];
142
+ }
143
+ /**
144
+ * IPC channel names for LLM operations
145
+ */
146
+ export declare const LLM_IPC_CHANNELS: {
147
+ readonly GET_PROVIDERS: "llm:get-providers";
148
+ readonly GET_MODELS: "llm:get-models";
149
+ readonly SEND_MESSAGE: "llm:send-message";
150
+ readonly IS_KEY_AVAILABLE: "llm:is-key-available";
151
+ };
152
+ /**
153
+ * Type for LLM IPC channel names
154
+ */
155
+ export type LLMIPCChannelName = (typeof LLM_IPC_CHANNELS)[keyof typeof LLM_IPC_CHANNELS];
@@ -0,0 +1,14 @@
1
+ "use strict";
2
+ // AI Summary: Core type definitions for the LLM interaction module.
3
+ // Defines request/response structures, settings, provider/model info, and error handling types.
4
+ Object.defineProperty(exports, "__esModule", { value: true });
5
+ exports.LLM_IPC_CHANNELS = void 0;
6
+ /**
7
+ * IPC channel names for LLM operations
8
+ */
9
+ exports.LLM_IPC_CHANNELS = {
10
+ GET_PROVIDERS: 'llm:get-providers',
11
+ GET_MODELS: 'llm:get-models',
12
+ SEND_MESSAGE: 'llm:send-message',
13
+ IS_KEY_AVAILABLE: 'llm:is-key-available',
14
+ };
@@ -0,0 +1,8 @@
1
+ import type { ApiKeyProvider } from "../types";
2
+ /**
3
+ * Creates an ApiKeyProvider that sources keys from system environment variables.
4
+ * It looks for variables in the format: PROVIDERID_API_KEY (e.g., OPENAI_API_KEY).
5
+ * This is a secure and standard practice for server-side applications.
6
+ * Note: Provider IDs are converted to uppercase.
7
+ */
8
+ export declare const fromEnvironment: ApiKeyProvider;
@@ -0,0 +1,14 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.fromEnvironment = void 0;
4
+ /**
5
+ * Creates an ApiKeyProvider that sources keys from system environment variables.
6
+ * It looks for variables in the format: PROVIDERID_API_KEY (e.g., OPENAI_API_KEY).
7
+ * This is a secure and standard practice for server-side applications.
8
+ * Note: Provider IDs are converted to uppercase.
9
+ */
10
+ const fromEnvironment = async (providerId) => {
11
+ const envVarName = `${providerId.toUpperCase()}_API_KEY`;
12
+ return process.env[envVarName] || null;
13
+ };
14
+ exports.fromEnvironment = fromEnvironment;
@@ -0,0 +1 @@
1
+ export type ApiKeyProvider = (providerId: string) => Promise<string | null>;