genai-lite 0.2.0 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +150 -5
- package/dist/config/presets.json +121 -17
- package/dist/llm/LLMService.d.ts +39 -2
- package/dist/llm/LLMService.js +291 -78
- package/dist/llm/LLMService.prepareMessage.test.d.ts +1 -0
- package/dist/llm/LLMService.prepareMessage.test.js +303 -0
- package/dist/llm/LLMService.sendMessage.preset.test.d.ts +1 -0
- package/dist/llm/LLMService.sendMessage.preset.test.js +153 -0
- package/dist/llm/LLMService.test.js +83 -0
- package/dist/llm/clients/AnthropicClientAdapter.js +64 -10
- package/dist/llm/clients/AnthropicClientAdapter.test.js +7 -1
- package/dist/llm/clients/GeminiClientAdapter.js +70 -11
- package/dist/llm/clients/GeminiClientAdapter.test.js +121 -1
- package/dist/llm/clients/MockClientAdapter.test.js +7 -1
- package/dist/llm/clients/OpenAIClientAdapter.js +26 -10
- package/dist/llm/clients/OpenAIClientAdapter.test.js +7 -1
- package/dist/llm/config.js +112 -2
- package/dist/llm/config.test.js +17 -0
- package/dist/llm/types.d.ts +106 -0
- package/package.json +3 -2
- package/src/config/presets.json +122 -17
package/dist/llm/LLMService.js
CHANGED
|
@@ -9,6 +9,7 @@ exports.LLMService = void 0;
|
|
|
9
9
|
const MockClientAdapter_1 = require("./clients/MockClientAdapter");
|
|
10
10
|
const config_1 = require("./config");
|
|
11
11
|
const presets_json_1 = __importDefault(require("../config/presets.json"));
|
|
12
|
+
const template_1 = require("../prompting/template");
|
|
12
13
|
/**
|
|
13
14
|
* Main process service for LLM operations
|
|
14
15
|
*
|
|
@@ -106,65 +107,34 @@ class LLMService {
|
|
|
106
107
|
* @returns Promise resolving to either success or failure response
|
|
107
108
|
*/
|
|
108
109
|
async sendMessage(request) {
|
|
109
|
-
console.log(`LLMService.sendMessage called
|
|
110
|
+
console.log(`LLMService.sendMessage called with presetId: ${request.presetId}, provider: ${request.providerId}, model: ${request.modelId}`);
|
|
110
111
|
try {
|
|
111
|
-
//
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
return
|
|
115
|
-
provider: request.providerId,
|
|
116
|
-
model: request.modelId,
|
|
117
|
-
error: {
|
|
118
|
-
message: `Unsupported provider: ${request.providerId}. Supported providers: ${config_1.SUPPORTED_PROVIDERS.map((p) => p.id).join(", ")}`,
|
|
119
|
-
code: "UNSUPPORTED_PROVIDER",
|
|
120
|
-
type: "validation_error",
|
|
121
|
-
},
|
|
122
|
-
object: "error",
|
|
123
|
-
};
|
|
124
|
-
}
|
|
125
|
-
// Validate model
|
|
126
|
-
if (!(0, config_1.isModelSupported)(request.modelId, request.providerId)) {
|
|
127
|
-
const availableModels = (0, config_1.getModelsByProvider)(request.providerId).map((m) => m.id);
|
|
128
|
-
console.warn(`Unsupported model ${request.modelId} for provider ${request.providerId}. Available: ${availableModels.join(", ")}`);
|
|
129
|
-
return {
|
|
130
|
-
provider: request.providerId,
|
|
131
|
-
model: request.modelId,
|
|
132
|
-
error: {
|
|
133
|
-
message: `Unsupported model: ${request.modelId} for provider: ${request.providerId}. Available models: ${availableModels.join(", ")}`,
|
|
134
|
-
code: "UNSUPPORTED_MODEL",
|
|
135
|
-
type: "validation_error",
|
|
136
|
-
},
|
|
137
|
-
object: "error",
|
|
138
|
-
};
|
|
139
|
-
}
|
|
140
|
-
// Get model info for additional validation or settings
|
|
141
|
-
const modelInfo = (0, config_1.getModelById)(request.modelId, request.providerId);
|
|
142
|
-
if (!modelInfo) {
|
|
143
|
-
// This shouldn't happen if validation above passed, but defensive programming
|
|
144
|
-
console.error(`Model info not found for validated model: ${request.modelId}`);
|
|
145
|
-
return {
|
|
146
|
-
provider: request.providerId,
|
|
147
|
-
model: request.modelId,
|
|
148
|
-
error: {
|
|
149
|
-
message: `Internal error: Model configuration not found for ${request.modelId}`,
|
|
150
|
-
code: "MODEL_CONFIG_ERROR",
|
|
151
|
-
type: "internal_error",
|
|
152
|
-
},
|
|
153
|
-
object: "error",
|
|
154
|
-
};
|
|
112
|
+
// Resolve model information from preset or direct IDs
|
|
113
|
+
const resolved = this.resolveModelInfo(request);
|
|
114
|
+
if (resolved.error) {
|
|
115
|
+
return resolved.error;
|
|
155
116
|
}
|
|
117
|
+
const { providerId, modelId, modelInfo, settings: resolvedSettings } = resolved;
|
|
118
|
+
// Create a proper LLMChatRequest with resolved values
|
|
119
|
+
const resolvedRequest = {
|
|
120
|
+
...request,
|
|
121
|
+
providerId: providerId,
|
|
122
|
+
modelId: modelId,
|
|
123
|
+
};
|
|
124
|
+
// Provider and model validation already done by resolveModelInfo
|
|
156
125
|
// Validate basic request structure
|
|
157
|
-
const structureValidationResult = this.validateRequestStructure(
|
|
126
|
+
const structureValidationResult = this.validateRequestStructure(resolvedRequest);
|
|
158
127
|
if (structureValidationResult) {
|
|
159
128
|
return structureValidationResult;
|
|
160
129
|
}
|
|
161
|
-
// Validate settings if provided
|
|
162
|
-
|
|
163
|
-
|
|
130
|
+
// Validate settings if provided
|
|
131
|
+
const combinedSettings = { ...resolvedSettings, ...request.settings };
|
|
132
|
+
if (combinedSettings) {
|
|
133
|
+
const settingsValidationErrors = (0, config_1.validateLLMSettings)(combinedSettings);
|
|
164
134
|
if (settingsValidationErrors.length > 0) {
|
|
165
135
|
return {
|
|
166
|
-
provider:
|
|
167
|
-
model:
|
|
136
|
+
provider: providerId,
|
|
137
|
+
model: modelId,
|
|
168
138
|
error: {
|
|
169
139
|
message: `Invalid settings: ${settingsValidationErrors.join(", ")}`,
|
|
170
140
|
code: "INVALID_SETTINGS",
|
|
@@ -175,11 +145,16 @@ class LLMService {
|
|
|
175
145
|
}
|
|
176
146
|
}
|
|
177
147
|
// Apply model-specific defaults and merge with user settings
|
|
178
|
-
const finalSettings = this.mergeSettingsForModel(
|
|
148
|
+
const finalSettings = this.mergeSettingsForModel(modelId, providerId, combinedSettings);
|
|
149
|
+
// Validate reasoning settings for model capabilities
|
|
150
|
+
const reasoningValidation = this.validateReasoningSettings(modelInfo, finalSettings.reasoning, resolvedRequest);
|
|
151
|
+
if (reasoningValidation) {
|
|
152
|
+
return reasoningValidation;
|
|
153
|
+
}
|
|
179
154
|
// Filter out unsupported parameters based on model and provider configuration
|
|
180
155
|
let filteredSettings = { ...finalSettings }; // Create a mutable copy
|
|
181
156
|
// Get provider info for parameter filtering (modelInfo is already available from earlier validation)
|
|
182
|
-
const providerInfo = (0, config_1.getProviderById)(
|
|
157
|
+
const providerInfo = (0, config_1.getProviderById)(providerId);
|
|
183
158
|
const paramsToExclude = new Set();
|
|
184
159
|
// Add provider-level exclusions
|
|
185
160
|
if (providerInfo?.unsupportedParameters) {
|
|
@@ -190,7 +165,7 @@ class LLMService {
|
|
|
190
165
|
modelInfo.unsupportedParameters.forEach((param) => paramsToExclude.add(param));
|
|
191
166
|
}
|
|
192
167
|
if (paramsToExclude.size > 0) {
|
|
193
|
-
console.log(`LLMService: Potential parameters to exclude for provider '${
|
|
168
|
+
console.log(`LLMService: Potential parameters to exclude for provider '${providerId}', model '${modelId}':`, Array.from(paramsToExclude));
|
|
194
169
|
}
|
|
195
170
|
paramsToExclude.forEach((param) => {
|
|
196
171
|
// Check if the parameter key actually exists in filteredSettings before trying to delete
|
|
@@ -199,53 +174,59 @@ class LLMService {
|
|
|
199
174
|
// For direct properties of an object, hasOwnProperty is more specific.
|
|
200
175
|
// Given finalSettings is Required<LLMSettings>, all keys should be present, potentially as undefined.
|
|
201
176
|
if (param in filteredSettings) {
|
|
202
|
-
console.log(`LLMService: Removing excluded parameter '${String(param)}' for provider '${
|
|
177
|
+
console.log(`LLMService: Removing excluded parameter '${String(param)}' for provider '${providerId}', model '${modelId}'. Value was:`, filteredSettings[param]);
|
|
203
178
|
delete filteredSettings[param]; // Cast to allow deletion
|
|
204
179
|
}
|
|
205
180
|
else {
|
|
206
181
|
// This case should ideally not happen if finalSettings truly is Required<LLMSettings>
|
|
207
182
|
// and mergeSettingsForModel ensures all keys are present (even if undefined).
|
|
208
|
-
console.log(`LLMService: Parameter '${String(param)}' marked for exclusion was not found in settings for provider '${
|
|
183
|
+
console.log(`LLMService: Parameter '${String(param)}' marked for exclusion was not found in settings for provider '${providerId}', model '${modelId}'.`);
|
|
209
184
|
}
|
|
210
185
|
});
|
|
186
|
+
// Handle reasoning settings for models that don't support it
|
|
187
|
+
// This happens after validateReasoningSettings so we know it's safe to strip
|
|
188
|
+
if (!modelInfo.reasoning?.supported && filteredSettings.reasoning) {
|
|
189
|
+
console.log(`LLMService: Removing reasoning settings for non-reasoning model ${modelId}`);
|
|
190
|
+
delete filteredSettings.reasoning;
|
|
191
|
+
}
|
|
211
192
|
const internalRequest = {
|
|
212
|
-
...
|
|
193
|
+
...resolvedRequest,
|
|
213
194
|
settings: filteredSettings,
|
|
214
195
|
};
|
|
215
196
|
console.log(`Processing LLM request with (potentially filtered) settings:`, {
|
|
216
|
-
provider:
|
|
217
|
-
model:
|
|
197
|
+
provider: providerId,
|
|
198
|
+
model: modelId,
|
|
218
199
|
settings: filteredSettings,
|
|
219
|
-
messageCount:
|
|
200
|
+
messageCount: resolvedRequest.messages.length,
|
|
220
201
|
});
|
|
221
|
-
console.log(`Processing LLM request: ${
|
|
202
|
+
console.log(`Processing LLM request: ${resolvedRequest.messages.length} messages, model: ${modelId}`);
|
|
222
203
|
// Get client adapter
|
|
223
|
-
const clientAdapter = this.getClientAdapter(
|
|
204
|
+
const clientAdapter = this.getClientAdapter(providerId);
|
|
224
205
|
// Use ApiKeyProvider to get the API key and make the request
|
|
225
206
|
try {
|
|
226
|
-
const apiKey = await this.getApiKey(
|
|
207
|
+
const apiKey = await this.getApiKey(providerId);
|
|
227
208
|
if (!apiKey) {
|
|
228
209
|
return {
|
|
229
|
-
provider:
|
|
230
|
-
model:
|
|
210
|
+
provider: providerId,
|
|
211
|
+
model: modelId,
|
|
231
212
|
error: {
|
|
232
|
-
message: `API key for provider '${
|
|
213
|
+
message: `API key for provider '${providerId}' could not be retrieved. Ensure your ApiKeyProvider is configured correctly.`,
|
|
233
214
|
code: "API_KEY_ERROR",
|
|
234
215
|
type: "authentication_error",
|
|
235
216
|
},
|
|
236
217
|
object: "error",
|
|
237
218
|
};
|
|
238
219
|
}
|
|
239
|
-
console.log(`Making LLM request with ${clientAdapter.constructor.name} for provider: ${
|
|
220
|
+
console.log(`Making LLM request with ${clientAdapter.constructor.name} for provider: ${providerId}`);
|
|
240
221
|
const result = await clientAdapter.sendMessage(internalRequest, apiKey);
|
|
241
|
-
console.log(`LLM request completed successfully for model: ${
|
|
222
|
+
console.log(`LLM request completed successfully for model: ${modelId}`);
|
|
242
223
|
return result;
|
|
243
224
|
}
|
|
244
225
|
catch (error) {
|
|
245
226
|
console.error("Error in LLMService.sendMessage:", error);
|
|
246
227
|
return {
|
|
247
|
-
provider:
|
|
248
|
-
model:
|
|
228
|
+
provider: providerId,
|
|
229
|
+
model: modelId,
|
|
249
230
|
error: {
|
|
250
231
|
message: error instanceof Error
|
|
251
232
|
? error.message
|
|
@@ -261,8 +242,8 @@ class LLMService {
|
|
|
261
242
|
catch (error) {
|
|
262
243
|
console.error("Error in LLMService.sendMessage (outer):", error);
|
|
263
244
|
return {
|
|
264
|
-
provider: request.providerId,
|
|
265
|
-
model: request.modelId,
|
|
245
|
+
provider: request.providerId || request.presetId || 'unknown',
|
|
246
|
+
model: request.modelId || request.presetId || 'unknown',
|
|
266
247
|
error: {
|
|
267
248
|
message: error instanceof Error
|
|
268
249
|
? error.message
|
|
@@ -287,8 +268,8 @@ class LLMService {
|
|
|
287
268
|
!Array.isArray(request.messages) ||
|
|
288
269
|
request.messages.length === 0) {
|
|
289
270
|
return {
|
|
290
|
-
provider: request.providerId,
|
|
291
|
-
model: request.modelId,
|
|
271
|
+
provider: request.providerId || request.presetId || 'unknown',
|
|
272
|
+
model: request.modelId || request.presetId || 'unknown',
|
|
292
273
|
error: {
|
|
293
274
|
message: "Request must contain at least one message",
|
|
294
275
|
code: "INVALID_REQUEST",
|
|
@@ -302,8 +283,8 @@ class LLMService {
|
|
|
302
283
|
const message = request.messages[i];
|
|
303
284
|
if (!message.role || !message.content) {
|
|
304
285
|
return {
|
|
305
|
-
provider: request.providerId,
|
|
306
|
-
model: request.modelId,
|
|
286
|
+
provider: request.providerId || ('presetId' in request ? request.presetId : undefined) || 'unknown',
|
|
287
|
+
model: request.modelId || ('presetId' in request ? request.presetId : undefined) || 'unknown',
|
|
307
288
|
error: {
|
|
308
289
|
message: `Message at index ${i} must have both 'role' and 'content' properties`,
|
|
309
290
|
code: "INVALID_MESSAGE",
|
|
@@ -314,8 +295,8 @@ class LLMService {
|
|
|
314
295
|
}
|
|
315
296
|
if (!["user", "assistant", "system"].includes(message.role)) {
|
|
316
297
|
return {
|
|
317
|
-
provider: request.providerId,
|
|
318
|
-
model: request.modelId,
|
|
298
|
+
provider: request.providerId || ('presetId' in request ? request.presetId : undefined) || 'unknown',
|
|
299
|
+
model: request.modelId || ('presetId' in request ? request.presetId : undefined) || 'unknown',
|
|
319
300
|
error: {
|
|
320
301
|
message: `Invalid message role '${message.role}' at index ${i}. Must be 'user', 'assistant', or 'system'`,
|
|
321
302
|
code: "INVALID_MESSAGE_ROLE",
|
|
@@ -327,6 +308,42 @@ class LLMService {
|
|
|
327
308
|
}
|
|
328
309
|
return null; // Request is valid
|
|
329
310
|
}
|
|
311
|
+
/**
|
|
312
|
+
* Validates reasoning settings against model capabilities
|
|
313
|
+
*
|
|
314
|
+
* @param modelInfo - The model information
|
|
315
|
+
* @param reasoning - The reasoning settings to validate
|
|
316
|
+
* @param request - The original request for error context
|
|
317
|
+
* @returns LLMFailureResponse if validation fails, null if valid
|
|
318
|
+
*/
|
|
319
|
+
validateReasoningSettings(modelInfo, reasoning, request) {
|
|
320
|
+
// If no reasoning settings provided, nothing to validate
|
|
321
|
+
if (!reasoning) {
|
|
322
|
+
return null;
|
|
323
|
+
}
|
|
324
|
+
// If model doesn't support reasoning
|
|
325
|
+
if (!modelInfo.reasoning?.supported) {
|
|
326
|
+
// Check if user is trying to enable reasoning
|
|
327
|
+
const tryingToEnableReasoning = reasoning.enabled === true ||
|
|
328
|
+
reasoning.effort !== undefined ||
|
|
329
|
+
(reasoning.maxTokens !== undefined && reasoning.maxTokens > 0);
|
|
330
|
+
if (tryingToEnableReasoning) {
|
|
331
|
+
return {
|
|
332
|
+
provider: request.providerId,
|
|
333
|
+
model: request.modelId,
|
|
334
|
+
error: {
|
|
335
|
+
message: `Model ${request.modelId} does not support reasoning/thinking`,
|
|
336
|
+
type: 'validation_error',
|
|
337
|
+
code: 'reasoning_not_supported'
|
|
338
|
+
},
|
|
339
|
+
object: 'error'
|
|
340
|
+
};
|
|
341
|
+
}
|
|
342
|
+
// Otherwise, user is explicitly disabling reasoning - this is fine
|
|
343
|
+
// The reasoning settings will be stripped later
|
|
344
|
+
}
|
|
345
|
+
return null;
|
|
346
|
+
}
|
|
330
347
|
/**
|
|
331
348
|
* Merges request settings with model-specific and global defaults
|
|
332
349
|
*
|
|
@@ -351,6 +368,10 @@ class LLMService {
|
|
|
351
368
|
modelDefaults.supportsSystemMessage,
|
|
352
369
|
geminiSafetySettings: requestSettings?.geminiSafetySettings ??
|
|
353
370
|
modelDefaults.geminiSafetySettings,
|
|
371
|
+
reasoning: {
|
|
372
|
+
...modelDefaults.reasoning,
|
|
373
|
+
...requestSettings?.reasoning,
|
|
374
|
+
},
|
|
354
375
|
};
|
|
355
376
|
// Log the final settings for debugging
|
|
356
377
|
console.log(`Merged settings for ${providerId}/${modelId}:`, {
|
|
@@ -362,6 +383,7 @@ class LLMService {
|
|
|
362
383
|
presencePenalty: mergedSettings.presencePenalty,
|
|
363
384
|
hasUser: !!mergedSettings.user,
|
|
364
385
|
geminiSafetySettingsCount: mergedSettings.geminiSafetySettings.length,
|
|
386
|
+
reasoning: mergedSettings.reasoning,
|
|
365
387
|
});
|
|
366
388
|
return mergedSettings;
|
|
367
389
|
}
|
|
@@ -439,5 +461,196 @@ class LLMService {
|
|
|
439
461
|
getPresets() {
|
|
440
462
|
return [...this.presets]; // Return a copy to prevent external modification
|
|
441
463
|
}
|
|
464
|
+
/**
|
|
465
|
+
* Resolves model information from either a preset ID or provider/model IDs
|
|
466
|
+
*
|
|
467
|
+
* @private
|
|
468
|
+
* @param options Options containing either presetId or providerId/modelId
|
|
469
|
+
* @returns Resolved model info and settings or error response
|
|
470
|
+
*/
|
|
471
|
+
resolveModelInfo(options) {
|
|
472
|
+
// If presetId is provided, use it
|
|
473
|
+
if (options.presetId) {
|
|
474
|
+
const preset = this.presets.find(p => p.id === options.presetId);
|
|
475
|
+
if (!preset) {
|
|
476
|
+
return {
|
|
477
|
+
error: {
|
|
478
|
+
provider: 'unknown',
|
|
479
|
+
model: 'unknown',
|
|
480
|
+
error: {
|
|
481
|
+
message: `Preset not found: ${options.presetId}`,
|
|
482
|
+
code: 'PRESET_NOT_FOUND',
|
|
483
|
+
type: 'validation_error',
|
|
484
|
+
},
|
|
485
|
+
object: 'error',
|
|
486
|
+
}
|
|
487
|
+
};
|
|
488
|
+
}
|
|
489
|
+
const modelInfo = (0, config_1.getModelById)(preset.modelId, preset.providerId);
|
|
490
|
+
if (!modelInfo) {
|
|
491
|
+
return {
|
|
492
|
+
error: {
|
|
493
|
+
provider: preset.providerId,
|
|
494
|
+
model: preset.modelId,
|
|
495
|
+
error: {
|
|
496
|
+
message: `Model not found for preset: ${options.presetId}`,
|
|
497
|
+
code: 'MODEL_NOT_FOUND',
|
|
498
|
+
type: 'validation_error',
|
|
499
|
+
},
|
|
500
|
+
object: 'error',
|
|
501
|
+
}
|
|
502
|
+
};
|
|
503
|
+
}
|
|
504
|
+
// Merge preset settings with user settings
|
|
505
|
+
const settings = {
|
|
506
|
+
...preset.settings,
|
|
507
|
+
...options.settings
|
|
508
|
+
};
|
|
509
|
+
return {
|
|
510
|
+
providerId: preset.providerId,
|
|
511
|
+
modelId: preset.modelId,
|
|
512
|
+
modelInfo,
|
|
513
|
+
settings
|
|
514
|
+
};
|
|
515
|
+
}
|
|
516
|
+
// Otherwise, use providerId and modelId
|
|
517
|
+
if (!options.providerId || !options.modelId) {
|
|
518
|
+
return {
|
|
519
|
+
error: {
|
|
520
|
+
provider: options.providerId || 'unknown',
|
|
521
|
+
model: options.modelId || 'unknown',
|
|
522
|
+
error: {
|
|
523
|
+
message: 'Either presetId or both providerId and modelId must be provided',
|
|
524
|
+
code: 'INVALID_MODEL_SELECTION',
|
|
525
|
+
type: 'validation_error',
|
|
526
|
+
},
|
|
527
|
+
object: 'error',
|
|
528
|
+
}
|
|
529
|
+
};
|
|
530
|
+
}
|
|
531
|
+
// Check if provider is supported first
|
|
532
|
+
if (!(0, config_1.isProviderSupported)(options.providerId)) {
|
|
533
|
+
return {
|
|
534
|
+
error: {
|
|
535
|
+
provider: options.providerId,
|
|
536
|
+
model: options.modelId,
|
|
537
|
+
error: {
|
|
538
|
+
message: `Unsupported provider: ${options.providerId}. Supported providers: ${config_1.SUPPORTED_PROVIDERS.map((p) => p.id).join(', ')}`,
|
|
539
|
+
code: 'UNSUPPORTED_PROVIDER',
|
|
540
|
+
type: 'validation_error',
|
|
541
|
+
},
|
|
542
|
+
object: 'error',
|
|
543
|
+
}
|
|
544
|
+
};
|
|
545
|
+
}
|
|
546
|
+
const modelInfo = (0, config_1.getModelById)(options.modelId, options.providerId);
|
|
547
|
+
if (!modelInfo) {
|
|
548
|
+
return {
|
|
549
|
+
error: {
|
|
550
|
+
provider: options.providerId,
|
|
551
|
+
model: options.modelId,
|
|
552
|
+
error: {
|
|
553
|
+
message: `Unsupported model: ${options.modelId} for provider: ${options.providerId}`,
|
|
554
|
+
code: 'UNSUPPORTED_MODEL',
|
|
555
|
+
type: 'validation_error',
|
|
556
|
+
},
|
|
557
|
+
object: 'error',
|
|
558
|
+
}
|
|
559
|
+
};
|
|
560
|
+
}
|
|
561
|
+
return {
|
|
562
|
+
providerId: options.providerId,
|
|
563
|
+
modelId: options.modelId,
|
|
564
|
+
modelInfo,
|
|
565
|
+
settings: options.settings
|
|
566
|
+
};
|
|
567
|
+
}
|
|
568
|
+
/**
|
|
569
|
+
* Prepares messages with model context for template rendering
|
|
570
|
+
*
|
|
571
|
+
* This method resolves model information from either a preset or direct provider/model IDs,
|
|
572
|
+
* then renders a template with model context variables injected, or returns pre-built messages
|
|
573
|
+
* with the model context separately.
|
|
574
|
+
*
|
|
575
|
+
* @param options Options for preparing messages
|
|
576
|
+
* @returns Promise resolving to prepared messages and model context
|
|
577
|
+
*
|
|
578
|
+
* @example
|
|
579
|
+
* ```typescript
|
|
580
|
+
* const { messages } = await llm.prepareMessage({
|
|
581
|
+
* template: 'Help me {{ thinking_enabled ? "think through" : "solve" }} this: {{ problem }}',
|
|
582
|
+
* variables: { problem: 'complex algorithm' },
|
|
583
|
+
* presetId: 'anthropic-claude-3-7-sonnet-20250219-thinking'
|
|
584
|
+
* });
|
|
585
|
+
* ```
|
|
586
|
+
*/
|
|
587
|
+
async prepareMessage(options) {
|
|
588
|
+
console.log('LLMService.prepareMessage called');
|
|
589
|
+
// Validate input
|
|
590
|
+
if (!options.template && !options.messages) {
|
|
591
|
+
return {
|
|
592
|
+
provider: 'unknown',
|
|
593
|
+
model: 'unknown',
|
|
594
|
+
error: {
|
|
595
|
+
message: 'Either template or messages must be provided',
|
|
596
|
+
code: 'INVALID_INPUT',
|
|
597
|
+
type: 'validation_error',
|
|
598
|
+
},
|
|
599
|
+
object: 'error',
|
|
600
|
+
};
|
|
601
|
+
}
|
|
602
|
+
// Resolve model information
|
|
603
|
+
const resolved = this.resolveModelInfo(options);
|
|
604
|
+
if (resolved.error) {
|
|
605
|
+
return resolved.error;
|
|
606
|
+
}
|
|
607
|
+
const { providerId, modelId, modelInfo, settings } = resolved;
|
|
608
|
+
// Merge settings with model defaults
|
|
609
|
+
const mergedSettings = this.mergeSettingsForModel(modelId, providerId, settings);
|
|
610
|
+
// Create model context
|
|
611
|
+
const modelContext = {
|
|
612
|
+
thinking_enabled: !!(modelInfo.reasoning?.supported &&
|
|
613
|
+
(mergedSettings.reasoning?.enabled === true ||
|
|
614
|
+
(modelInfo.reasoning?.enabledByDefault && mergedSettings.reasoning?.enabled !== false))),
|
|
615
|
+
thinking_available: !!modelInfo.reasoning?.supported,
|
|
616
|
+
model_id: modelId,
|
|
617
|
+
provider_id: providerId,
|
|
618
|
+
reasoning_effort: mergedSettings.reasoning?.effort,
|
|
619
|
+
reasoning_max_tokens: mergedSettings.reasoning?.maxTokens,
|
|
620
|
+
};
|
|
621
|
+
// Prepare messages
|
|
622
|
+
let messages;
|
|
623
|
+
if (options.template) {
|
|
624
|
+
// Render template with variables and model context
|
|
625
|
+
const allVariables = {
|
|
626
|
+
...options.variables,
|
|
627
|
+
...modelContext, // Inject model context at root level
|
|
628
|
+
};
|
|
629
|
+
try {
|
|
630
|
+
const content = (0, template_1.renderTemplate)(options.template, allVariables);
|
|
631
|
+
messages = [{ role: 'user', content }];
|
|
632
|
+
}
|
|
633
|
+
catch (error) {
|
|
634
|
+
return {
|
|
635
|
+
provider: providerId,
|
|
636
|
+
model: modelId,
|
|
637
|
+
error: {
|
|
638
|
+
message: `Template rendering failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
|
639
|
+
code: 'TEMPLATE_ERROR',
|
|
640
|
+
type: 'validation_error',
|
|
641
|
+
},
|
|
642
|
+
object: 'error',
|
|
643
|
+
};
|
|
644
|
+
}
|
|
645
|
+
}
|
|
646
|
+
else {
|
|
647
|
+
// Use pre-built messages
|
|
648
|
+
messages = options.messages;
|
|
649
|
+
}
|
|
650
|
+
return {
|
|
651
|
+
messages,
|
|
652
|
+
modelContext,
|
|
653
|
+
};
|
|
654
|
+
}
|
|
442
655
|
}
|
|
443
656
|
exports.LLMService = LLMService;
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|