llmjs2 1.1.1 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CONFIG_README.md +98 -0
- package/README.md +382 -357
- package/cli.js +195 -0
- package/config.yaml +149 -0
- package/docs/BASIC_USAGE.md +296 -0
- package/docs/CLI.md +455 -0
- package/docs/GET_STARTED.md +129 -0
- package/docs/GUARDRAILS_GUIDE.md +734 -0
- package/docs/README.md +47 -0
- package/docs/ROUTER_GUIDE.md +397 -0
- package/docs/SERVER_MODE.md +350 -0
- package/index.js +199 -246
- package/package.json +43 -34
- package/providers/ollama.js +120 -88
- package/providers/openai.js +104 -0
- package/providers/openrouter.js +113 -79
- package/router.js +248 -0
- package/server.js +186 -0
- package/test.js +246 -0
- package/validate-config.js +87 -0
- package/LICENSE +0 -21
package/router.js
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
const OpenAIProvider = require('./providers/openai');
|
|
2
|
+
const OllamaProvider = require('./providers/ollama');
|
|
3
|
+
const OpenRouterProvider = require('./providers/openrouter');
|
|
4
|
+
|
|
5
|
+
class Router {
|
|
6
|
+
constructor(modelList, strategy = 'default') {
|
|
7
|
+
this.modelList = this.normalizeModelList(modelList);
|
|
8
|
+
this.strategy = strategy;
|
|
9
|
+
this.providers = {
|
|
10
|
+
openai: new OpenAIProvider(),
|
|
11
|
+
ollama: new OllamaProvider(),
|
|
12
|
+
openrouter: new OpenRouterProvider()
|
|
13
|
+
};
|
|
14
|
+
this.guardrails = [];
|
|
15
|
+
this.sequentialIndex = 0;
|
|
16
|
+
|
|
17
|
+
// Group models by model_name for load balancing
|
|
18
|
+
this.modelsByName = {};
|
|
19
|
+
this.modelList.forEach(model => {
|
|
20
|
+
if (!this.modelsByName[model.model_name]) {
|
|
21
|
+
this.modelsByName[model.model_name] = [];
|
|
22
|
+
}
|
|
23
|
+
this.modelsByName[model.model_name].push(model);
|
|
24
|
+
});
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
normalizeModelList(modelList) {
|
|
28
|
+
return modelList.map(model => ({
|
|
29
|
+
model_name: model.model_name,
|
|
30
|
+
llm_params: {
|
|
31
|
+
model: model.llm_params.model,
|
|
32
|
+
api_key: this.resolveApiKey(model.llm_params.api_key),
|
|
33
|
+
api_base: model.llm_params.api_base
|
|
34
|
+
}
|
|
35
|
+
}));
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
resolveApiKey(apiKey) {
|
|
39
|
+
if (typeof apiKey === 'string' && apiKey.startsWith('os.environ/')) {
|
|
40
|
+
const envVar = apiKey.replace('os.environ/', '');
|
|
41
|
+
return process.env[envVar] || apiKey;
|
|
42
|
+
}
|
|
43
|
+
return apiKey;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
setGuardrails(guardrails) {
|
|
47
|
+
this.guardrails = guardrails || [];
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
selectModel(modelName) {
|
|
51
|
+
if (!modelName) {
|
|
52
|
+
// Auto-routing based on strategy
|
|
53
|
+
return this.autoSelectModel();
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// Direct model selection with load balancing
|
|
57
|
+
const availableModels = this.modelsByName[modelName];
|
|
58
|
+
if (!availableModels || availableModels.length === 0) {
|
|
59
|
+
throw new Error(`Model '${modelName}' not found in router configuration`);
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
if (availableModels.length === 1) {
|
|
63
|
+
return availableModels[0];
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// Load balancing for multiple models with same name
|
|
67
|
+
const randomIndex = Math.floor(Math.random() * availableModels.length);
|
|
68
|
+
return availableModels[randomIndex];
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
autoSelectModel() {
|
|
72
|
+
const allModels = this.modelList;
|
|
73
|
+
|
|
74
|
+
switch (this.strategy) {
|
|
75
|
+
case 'random':
|
|
76
|
+
const randomIndex = Math.floor(Math.random() * allModels.length);
|
|
77
|
+
return allModels[randomIndex];
|
|
78
|
+
|
|
79
|
+
case 'sequential':
|
|
80
|
+
const model = allModels[this.sequentialIndex];
|
|
81
|
+
this.sequentialIndex = (this.sequentialIndex + 1) % allModels.length;
|
|
82
|
+
return model;
|
|
83
|
+
|
|
84
|
+
case 'default':
|
|
85
|
+
default:
|
|
86
|
+
// Use first available model
|
|
87
|
+
return allModels[0];
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
async applyPreGuardrails(processId, input) {
|
|
92
|
+
let currentInput = input;
|
|
93
|
+
|
|
94
|
+
for (const guardrail of this.guardrails) {
|
|
95
|
+
if (guardrail.mode === 'pre_call') {
|
|
96
|
+
try {
|
|
97
|
+
const result = await this.executeGuardrail(guardrail, processId, currentInput);
|
|
98
|
+
if (result === null || result === undefined) {
|
|
99
|
+
throw new Error(`Guardrail '${guardrail.name}' returned null/undefined`);
|
|
100
|
+
}
|
|
101
|
+
currentInput = result;
|
|
102
|
+
} catch (error) {
|
|
103
|
+
throw new Error(`Pre-call guardrail '${guardrail.name}' failed: ${error.message}`);
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
return currentInput;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
async applyPostGuardrails(processId, result) {
|
|
112
|
+
let currentResult = result;
|
|
113
|
+
|
|
114
|
+
for (const guardrail of this.guardrails) {
|
|
115
|
+
if (guardrail.mode === 'post_call') {
|
|
116
|
+
try {
|
|
117
|
+
const processed = await this.executeGuardrail(guardrail, processId, currentResult);
|
|
118
|
+
if (processed === null || processed === undefined) {
|
|
119
|
+
throw new Error(`Guardrail '${guardrail.name}' returned null/undefined`);
|
|
120
|
+
}
|
|
121
|
+
currentResult = processed;
|
|
122
|
+
} catch (error) {
|
|
123
|
+
throw new Error(`Post-call guardrail '${guardrail.name}' failed: ${error.message}`);
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
return currentResult;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
async executeGuardrail(guardrail, processId, data) {
|
|
132
|
+
if (typeof guardrail.code === 'string') {
|
|
133
|
+
// Execute string code as function
|
|
134
|
+
const func = new Function('processId', 'data', `return (${guardrail.code})(processId, data)`);
|
|
135
|
+
return await func(processId, data);
|
|
136
|
+
} else if (typeof guardrail.code === 'function') {
|
|
137
|
+
return await guardrail.code(processId, data);
|
|
138
|
+
} else {
|
|
139
|
+
throw new Error(`Invalid guardrail code for '${guardrail.name}'`);
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
async completion(options) {
|
|
144
|
+
const processId = this.generateProcessId();
|
|
145
|
+
let selectedModel;
|
|
146
|
+
|
|
147
|
+
try {
|
|
148
|
+
// Select model
|
|
149
|
+
selectedModel = this.selectModel(options.model);
|
|
150
|
+
console.log(`[${processId}] Selected model: ${selectedModel.llm_params.model}`);
|
|
151
|
+
|
|
152
|
+
// Prepare input for guardrails
|
|
153
|
+
const input = {
|
|
154
|
+
model: selectedModel.llm_params.model,
|
|
155
|
+
messages: options.messages || [],
|
|
156
|
+
temperature: options.temperature,
|
|
157
|
+
maxTokens: options.max_tokens || options.maxTokens,
|
|
158
|
+
topP: options.top_p || options.topP,
|
|
159
|
+
frequencyPenalty: options.frequency_penalty || options.frequencyPenalty,
|
|
160
|
+
presencePenalty: options.presence_penalty || options.presencePenalty,
|
|
161
|
+
stop: options.stop,
|
|
162
|
+
tools: options.tools,
|
|
163
|
+
toolChoice: options.tool_choice || options.toolChoice
|
|
164
|
+
};
|
|
165
|
+
|
|
166
|
+
// Apply pre-call guardrails
|
|
167
|
+
const processedInput = await this.applyPreGuardrails(processId, input);
|
|
168
|
+
|
|
169
|
+
// Create completion using selected model
|
|
170
|
+
const result = await this.callProvider(selectedModel, processedInput);
|
|
171
|
+
|
|
172
|
+
// Apply post-call guardrails
|
|
173
|
+
const finalResult = await this.applyPostGuardrails(processId, result);
|
|
174
|
+
|
|
175
|
+
// Return result with selected model information
|
|
176
|
+
return {
|
|
177
|
+
result: finalResult,
|
|
178
|
+
selectedModel: selectedModel.llm_params.model, // Full model name with provider prefix
|
|
179
|
+
selectedModelName: selectedModel.model_name // User-friendly model name
|
|
180
|
+
};
|
|
181
|
+
|
|
182
|
+
} catch (error) {
|
|
183
|
+
console.error(`[${processId}] Router error:`, error.message);
|
|
184
|
+
throw error;
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
async callProvider(modelConfig, input) {
|
|
189
|
+
const firstSlashIndex = modelConfig.llm_params.model.indexOf('/');
|
|
190
|
+
const providerName = firstSlashIndex !== -1 ? modelConfig.llm_params.model.substring(0, firstSlashIndex) : '';
|
|
191
|
+
const actualModel = firstSlashIndex !== -1 ? modelConfig.llm_params.model.substring(firstSlashIndex + 1) : modelConfig.llm_params.model;
|
|
192
|
+
const provider = this.providers[providerName];
|
|
193
|
+
|
|
194
|
+
if (!provider) {
|
|
195
|
+
throw new Error(`Unknown provider: ${providerName}`);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// Set the API key for this request (only if provided, otherwise use provider's default)
|
|
199
|
+
if (modelConfig.llm_params.api_key !== undefined) {
|
|
200
|
+
provider.apiKey = modelConfig.llm_params.api_key;
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
// Call the provider directly with just the actual model name (without provider prefix)
|
|
204
|
+
return await provider.createCompletion(input.messages, {
|
|
205
|
+
model: actualModel,
|
|
206
|
+
temperature: input.temperature,
|
|
207
|
+
maxTokens: input.maxTokens,
|
|
208
|
+
topP: input.topP,
|
|
209
|
+
frequencyPenalty: input.frequencyPenalty,
|
|
210
|
+
presencePenalty: input.presencePenalty,
|
|
211
|
+
stop: input.stop,
|
|
212
|
+
tools: input.tools,
|
|
213
|
+
toolChoice: input.toolChoice
|
|
214
|
+
});
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
generateProcessId() {
|
|
218
|
+
return `req_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
// Method to add guardrails dynamically
|
|
222
|
+
addGuardrail(guardrail) {
|
|
223
|
+
this.guardrails.push(guardrail);
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
// Method to get available models
|
|
227
|
+
getAvailableModels() {
|
|
228
|
+
return Object.keys(this.modelsByName);
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
// Method to get model count for load balancing info
|
|
232
|
+
getModelStats() {
|
|
233
|
+
const stats = {};
|
|
234
|
+
Object.keys(this.modelsByName).forEach(modelName => {
|
|
235
|
+
stats[modelName] = this.modelsByName[modelName].length;
|
|
236
|
+
});
|
|
237
|
+
return stats;
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
function router(modelList, strategy = 'default') {
|
|
242
|
+
return new Router(modelList, strategy);
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
module.exports = {
|
|
246
|
+
Router,
|
|
247
|
+
router
|
|
248
|
+
};
|
package/server.js
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
const http = require('http');
|
|
2
|
+
const url = require('url');
|
|
3
|
+
|
|
4
|
+
class Server {
|
|
5
|
+
constructor(options = {}) {
|
|
6
|
+
this.port = options.port || process.env.PORT || 3000;
|
|
7
|
+
this.host = options.host || process.env.HOST || 'localhost';
|
|
8
|
+
this.router = options.router;
|
|
9
|
+
this.middlewares = [];
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
use(middleware) {
|
|
13
|
+
if (middleware && typeof middleware.completion === 'function') {
|
|
14
|
+
// It's a router
|
|
15
|
+
this.router = middleware;
|
|
16
|
+
} else if (typeof middleware === 'function') {
|
|
17
|
+
// It's middleware
|
|
18
|
+
this.middlewares.push(middleware);
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
async handleRequest(req, res) {
|
|
23
|
+
try {
|
|
24
|
+
// Set CORS headers
|
|
25
|
+
res.setHeader('Access-Control-Allow-Origin', '*');
|
|
26
|
+
res.setHeader('Access-Control-Allow-Methods', 'GET, POST, OPTIONS');
|
|
27
|
+
res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization');
|
|
28
|
+
|
|
29
|
+
if (req.method === 'OPTIONS') {
|
|
30
|
+
res.writeHead(200);
|
|
31
|
+
res.end();
|
|
32
|
+
return;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
const parsedUrl = url.parse(req.url, true);
|
|
36
|
+
const path = parsedUrl.pathname;
|
|
37
|
+
|
|
38
|
+
// Only handle chat completions endpoint
|
|
39
|
+
if (path !== '/v1/chat/completions' || req.method !== 'POST') {
|
|
40
|
+
this.sendError(res, 404, 'Not Found', 'Endpoint not found');
|
|
41
|
+
return;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
// Parse request body
|
|
45
|
+
const body = await this.parseBody(req);
|
|
46
|
+
|
|
47
|
+
// Validate request
|
|
48
|
+
const validation = this.validateChatRequest(body);
|
|
49
|
+
if (!validation.valid) {
|
|
50
|
+
this.sendError(res, 400, 'Bad Request', validation.error);
|
|
51
|
+
return;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
console.log(`[${new Date().toISOString()}] ${req.method} ${req.url}`);
|
|
55
|
+
console.log('Headers:', JSON.stringify(req.headers));
|
|
56
|
+
console.log('Body parsing completed successfully');
|
|
57
|
+
|
|
58
|
+
// Process the completion request
|
|
59
|
+
const result = await this.processCompletion(body);
|
|
60
|
+
|
|
61
|
+
// Send successful response
|
|
62
|
+
this.sendSuccess(res, result);
|
|
63
|
+
|
|
64
|
+
} catch (error) {
|
|
65
|
+
console.error('Server error:', error);
|
|
66
|
+
this.sendError(res, 500, 'Internal Server Error', error.message);
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
parseBody(req) {
|
|
71
|
+
return new Promise((resolve, reject) => {
|
|
72
|
+
let body = '';
|
|
73
|
+
req.on('data', chunk => {
|
|
74
|
+
body += chunk.toString();
|
|
75
|
+
});
|
|
76
|
+
req.on('end', () => {
|
|
77
|
+
try {
|
|
78
|
+
resolve(JSON.parse(body));
|
|
79
|
+
} catch (error) {
|
|
80
|
+
reject(new Error('Invalid JSON in request body'));
|
|
81
|
+
}
|
|
82
|
+
});
|
|
83
|
+
req.on('error', reject);
|
|
84
|
+
});
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
validateChatRequest(body) {
|
|
88
|
+
if (!body) {
|
|
89
|
+
return { valid: false, error: 'Request body is required' };
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
if (!body.messages || !Array.isArray(body.messages)) {
|
|
93
|
+
return { valid: false, error: 'messages array is required' };
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
if (body.messages.length === 0) {
|
|
97
|
+
return { valid: false, error: 'messages array cannot be empty' };
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
for (const message of body.messages) {
|
|
101
|
+
if (!message.role || !['system', 'user', 'assistant'].includes(message.role)) {
|
|
102
|
+
return { valid: false, error: 'Each message must have a valid role (system, user, or assistant)' };
|
|
103
|
+
}
|
|
104
|
+
if (typeof message.content !== 'string') {
|
|
105
|
+
return { valid: false, error: 'Each message must have string content' };
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
return { valid: true };
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
async processCompletion(body) {
|
|
113
|
+
if (!this.router) {
|
|
114
|
+
throw new Error('No router configured. Use app.use(router) to add a router.');
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
console.log(`Starting completion with model: ${body.model || 'auto-selected'}`);
|
|
118
|
+
|
|
119
|
+
const routerResponse = await this.router.completion(body);
|
|
120
|
+
const { result, selectedModel, selectedModelName } = routerResponse;
|
|
121
|
+
|
|
122
|
+
// Return format with metadata plus message array
|
|
123
|
+
const assistantMessage = {
|
|
124
|
+
role: 'assistant',
|
|
125
|
+
content: typeof result === 'string' ? result : result.content || ''
|
|
126
|
+
};
|
|
127
|
+
|
|
128
|
+
// Include original messages plus assistant response
|
|
129
|
+
const messages = [...(body.messages || []), assistantMessage];
|
|
130
|
+
|
|
131
|
+
return {
|
|
132
|
+
id: `chatcmpl-${Date.now()}`,
|
|
133
|
+
object: 'chat.completion',
|
|
134
|
+
created: Math.floor(Date.now() / 1000),
|
|
135
|
+
model: body.model || selectedModel, // Use selected model if auto-selected
|
|
136
|
+
messages: messages
|
|
137
|
+
};
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
sendSuccess(res, data) {
|
|
141
|
+
const response = JSON.stringify(data);
|
|
142
|
+
res.writeHead(200, {
|
|
143
|
+
'Content-Type': 'application/json',
|
|
144
|
+
'Content-Length': Buffer.byteLength(response)
|
|
145
|
+
});
|
|
146
|
+
res.end(response);
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
sendError(res, statusCode, type, message) {
|
|
150
|
+
const error = {
|
|
151
|
+
error: {
|
|
152
|
+
message: message,
|
|
153
|
+
type: type.toLowerCase().replace(/\s+/g, '_')
|
|
154
|
+
}
|
|
155
|
+
};
|
|
156
|
+
const response = JSON.stringify(error);
|
|
157
|
+
res.writeHead(statusCode, {
|
|
158
|
+
'Content-Type': 'application/json',
|
|
159
|
+
'Content-Length': Buffer.byteLength(response)
|
|
160
|
+
});
|
|
161
|
+
res.end(response);
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
listen(port, host, callback) {
|
|
165
|
+
const actualPort = port || this.port;
|
|
166
|
+
const actualHost = host || this.host;
|
|
167
|
+
|
|
168
|
+
const server = http.createServer((req, res) => this.handleRequest(req, res));
|
|
169
|
+
|
|
170
|
+
server.listen(actualPort, actualHost, () => {
|
|
171
|
+
console.log(`🚀 llmjs2 server running on http://${actualHost}:${actualPort}`);
|
|
172
|
+
if (callback) callback();
|
|
173
|
+
});
|
|
174
|
+
|
|
175
|
+
return server;
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
function createApp(options = {}) {
|
|
180
|
+
return new Server(options);
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
module.exports = {
|
|
184
|
+
Server,
|
|
185
|
+
app: createApp()
|
|
186
|
+
};
|
package/test.js
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
// Import all modules
|
|
2
|
+
const { completion: llmCompletion, LLMJS2: LLMJS2Class, router, app } = require('./index');
|
|
3
|
+
|
|
4
|
+
// Basic validation tests
|
|
5
|
+
async function runTests() {
|
|
6
|
+
console.log('Running LLMJS2 comprehensive tests...\n');
|
|
7
|
+
|
|
8
|
+
let testCount = 0;
|
|
9
|
+
let passedCount = 0;
|
|
10
|
+
|
|
11
|
+
// Test 1: Input validation
|
|
12
|
+
testCount++;
|
|
13
|
+
try {
|
|
14
|
+
await llmCompletion('');
|
|
15
|
+
console.log('❌ Test 1 failed: Should reject empty prompt');
|
|
16
|
+
} catch (error) {
|
|
17
|
+
console.log('✅ Test 1 passed: Empty prompt rejected');
|
|
18
|
+
passedCount++;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
// Test 2: Invalid input type
|
|
22
|
+
testCount++;
|
|
23
|
+
try {
|
|
24
|
+
await llmCompletion(123);
|
|
25
|
+
console.log('❌ Test 2 failed: Should reject non-string/non-object input');
|
|
26
|
+
} catch (error) {
|
|
27
|
+
console.log('✅ Test 2 passed: Invalid input type rejected');
|
|
28
|
+
passedCount++;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// Test 3: Invalid messages array
|
|
32
|
+
testCount++;
|
|
33
|
+
try {
|
|
34
|
+
await llmCompletion({ messages: [] });
|
|
35
|
+
console.log('❌ Test 3 failed: Should reject empty messages array');
|
|
36
|
+
} catch (error) {
|
|
37
|
+
console.log('✅ Test 3 passed: Empty messages array rejected');
|
|
38
|
+
passedCount++;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// Test 4: Invalid message format
|
|
42
|
+
testCount++;
|
|
43
|
+
try {
|
|
44
|
+
await llmCompletion({ messages: [{ content: 'test' }] });
|
|
45
|
+
console.log('❌ Test 4 failed: Should reject message without role');
|
|
46
|
+
} catch (error) {
|
|
47
|
+
console.log('✅ Test 4 passed: Message without role rejected');
|
|
48
|
+
passedCount++;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// Test 5: Invalid provider
|
|
52
|
+
testCount++;
|
|
53
|
+
try {
|
|
54
|
+
const llm = new LLMJS2();
|
|
55
|
+
llm.getProvider('unknown/test');
|
|
56
|
+
console.log('❌ Test 5 failed: Should reject unknown provider');
|
|
57
|
+
} catch (error) {
|
|
58
|
+
console.log('✅ Test 5 passed: Unknown provider rejected -', error.message);
|
|
59
|
+
passedCount++;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// Test 6: LLMJS2 class instantiation
|
|
63
|
+
testCount++;
|
|
64
|
+
try {
|
|
65
|
+
const llm = new LLMJS2Class();
|
|
66
|
+
console.log('✅ Test 6 passed: LLMJS2 class instantiated successfully');
|
|
67
|
+
passedCount++;
|
|
68
|
+
} catch (error) {
|
|
69
|
+
console.log('❌ Test 6 failed:', error.message);
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// Test 7: Provider availability check (without API keys)
|
|
73
|
+
testCount++;
|
|
74
|
+
try {
|
|
75
|
+
// Temporarily clear API keys for this test
|
|
76
|
+
const originalKeys = {
|
|
77
|
+
openai: process.env.OPENAI_API_KEY,
|
|
78
|
+
ollama: process.env.OLLAMA_API_KEY,
|
|
79
|
+
openrouter: process.env.OPEN_ROUTER_API_KEY
|
|
80
|
+
};
|
|
81
|
+
|
|
82
|
+
delete process.env.OPENAI_API_KEY;
|
|
83
|
+
delete process.env.OLLAMA_API_KEY;
|
|
84
|
+
delete process.env.OPEN_ROUTER_API_KEY;
|
|
85
|
+
|
|
86
|
+
const llm = new LLMJS2Class();
|
|
87
|
+
const available = llm.getAvailableProviders();
|
|
88
|
+
|
|
89
|
+
// Restore original keys
|
|
90
|
+
process.env.OPENAI_API_KEY = originalKeys.openai;
|
|
91
|
+
process.env.OLLAMA_API_KEY = originalKeys.ollama;
|
|
92
|
+
process.env.OPEN_ROUTER_API_KEY = originalKeys.openrouter;
|
|
93
|
+
|
|
94
|
+
if (available.length === 0) {
|
|
95
|
+
console.log('✅ Test 7 passed: No providers available without API keys');
|
|
96
|
+
passedCount++;
|
|
97
|
+
} else {
|
|
98
|
+
console.log('❌ Test 7 failed: Should have no providers without API keys');
|
|
99
|
+
}
|
|
100
|
+
} catch (error) {
|
|
101
|
+
console.log('❌ Test 7 failed:', error.message);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
// Test 8: Model parsing
|
|
105
|
+
testCount++;
|
|
106
|
+
try {
|
|
107
|
+
const llm = new LLMJS2Class();
|
|
108
|
+
const { provider, model } = llm.parseModel('openai/gpt-3.5-turbo');
|
|
109
|
+
if (provider === 'openai' && model === 'gpt-3.5-turbo') {
|
|
110
|
+
console.log('✅ Test 8 passed: Model parsing works correctly');
|
|
111
|
+
passedCount++;
|
|
112
|
+
} else {
|
|
113
|
+
console.log('❌ Test 8 failed: Model parsing incorrect');
|
|
114
|
+
}
|
|
115
|
+
} catch (error) {
|
|
116
|
+
console.log('❌ Test 8 failed:', error.message);
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// Test 9: Router creation
|
|
120
|
+
testCount++;
|
|
121
|
+
try {
|
|
122
|
+
const modelList = [
|
|
123
|
+
{
|
|
124
|
+
model_name: 'test',
|
|
125
|
+
llm_params: {
|
|
126
|
+
model: 'ollama/test',
|
|
127
|
+
api_key: 'test-key'
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
];
|
|
131
|
+
const route = router(modelList, 'random');
|
|
132
|
+
console.log('✅ Test 9 passed: Router created successfully');
|
|
133
|
+
passedCount++;
|
|
134
|
+
} catch (error) {
|
|
135
|
+
console.log('❌ Test 9 failed:', error.message);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// Test 10: Router model selection
|
|
139
|
+
testCount++;
|
|
140
|
+
try {
|
|
141
|
+
const modelList = [
|
|
142
|
+
{
|
|
143
|
+
model_name: 'test',
|
|
144
|
+
llm_params: {
|
|
145
|
+
model: 'ollama/test',
|
|
146
|
+
api_key: 'test-key'
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
];
|
|
150
|
+
const route = router(modelList);
|
|
151
|
+
const selectedModel = route.selectModel('test');
|
|
152
|
+
if (selectedModel.llm_params.model === 'ollama/test') {
|
|
153
|
+
console.log('✅ Test 10 passed: Router model selection works');
|
|
154
|
+
passedCount++;
|
|
155
|
+
} else {
|
|
156
|
+
console.log('❌ Test 10 failed: Incorrect model selected');
|
|
157
|
+
}
|
|
158
|
+
} catch (error) {
|
|
159
|
+
console.log('❌ Test 10 failed:', error.message);
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
// Test 11: Router auto-selection
|
|
163
|
+
testCount++;
|
|
164
|
+
try {
|
|
165
|
+
const modelList = [
|
|
166
|
+
{
|
|
167
|
+
model_name: 'test1',
|
|
168
|
+
llm_params: {
|
|
169
|
+
model: 'ollama/test1',
|
|
170
|
+
api_key: 'test-key'
|
|
171
|
+
}
|
|
172
|
+
},
|
|
173
|
+
{
|
|
174
|
+
model_name: 'test2',
|
|
175
|
+
llm_params: {
|
|
176
|
+
model: 'ollama/test2',
|
|
177
|
+
api_key: 'test-key'
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
];
|
|
181
|
+
const route = router(modelList, 'random');
|
|
182
|
+
const selectedModel = route.autoSelectModel();
|
|
183
|
+
if (selectedModel && selectedModel.llm_params.model.startsWith('ollama/')) {
|
|
184
|
+
console.log('✅ Test 11 passed: Router auto-selection works');
|
|
185
|
+
passedCount++;
|
|
186
|
+
} else {
|
|
187
|
+
console.log('❌ Test 11 failed: Auto-selection failed');
|
|
188
|
+
}
|
|
189
|
+
} catch (error) {
|
|
190
|
+
console.log('❌ Test 11 failed:', error.message);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// Test 12: Guardrails execution
|
|
194
|
+
testCount++;
|
|
195
|
+
try {
|
|
196
|
+
const modelList = [
|
|
197
|
+
{
|
|
198
|
+
model_name: 'test',
|
|
199
|
+
llm_params: {
|
|
200
|
+
model: 'ollama/test',
|
|
201
|
+
api_key: 'test-key'
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
];
|
|
205
|
+
const route = router(modelList);
|
|
206
|
+
|
|
207
|
+
route.setGuardrails([
|
|
208
|
+
{
|
|
209
|
+
name: 'test_guardrail',
|
|
210
|
+
mode: 'pre_call',
|
|
211
|
+
code: (processId, input) => {
|
|
212
|
+
return { ...input, messages: [...input.messages, { role: 'system', content: 'Test' }] };
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
]);
|
|
216
|
+
|
|
217
|
+
console.log('✅ Test 12 passed: Guardrails set successfully');
|
|
218
|
+
passedCount++;
|
|
219
|
+
} catch (error) {
|
|
220
|
+
console.log('❌ Test 12 failed:', error.message);
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
// Test 13: Server app creation
|
|
224
|
+
testCount++;
|
|
225
|
+
try {
|
|
226
|
+
const testApp = app; // Already created
|
|
227
|
+
if (testApp && typeof testApp.use === 'function') {
|
|
228
|
+
console.log('✅ Test 13 passed: Server app created successfully');
|
|
229
|
+
passedCount++;
|
|
230
|
+
} else {
|
|
231
|
+
console.log('❌ Test 13 failed: Server app creation failed');
|
|
232
|
+
}
|
|
233
|
+
} catch (error) {
|
|
234
|
+
console.log('❌ Test 13 failed:', error.message);
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
console.log(`\nTest Results: ${passedCount}/${testCount} tests passed`);
|
|
238
|
+
if (passedCount === testCount) {
|
|
239
|
+
console.log('🎉 All tests passed!');
|
|
240
|
+
} else {
|
|
241
|
+
console.log('⚠️ Some tests failed. Please review the implementation.');
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
// Run tests
|
|
246
|
+
runTests().catch(console.error);
|