llmjs2 1.3.1 ā 1.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +91 -5
- package/index.js +18 -0
- package/logger.js +115 -0
- package/package.json +4 -1
- package/router.js +26 -3
- package/server.js +12 -5
- package/test-completion.js +98 -0
- package/CONFIG_README.md +0 -98
- package/validate-config.js +0 -87
package/README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# llmjs2
|
|
2
2
|
|
|
3
|
-
A unified
|
|
3
|
+
A unified Node.js library for connecting to multiple Large Language Model (LLM) providers: OpenAI, Ollama, and OpenRouter.
|
|
4
4
|
|
|
5
5
|
**Features:**
|
|
6
6
|
- **Unified API**: Single interface for OpenAI, Ollama, and OpenRouter
|
|
@@ -37,11 +37,8 @@ npm install -g llmjs2
|
|
|
37
37
|
Try the sample configuration:
|
|
38
38
|
|
|
39
39
|
```bash
|
|
40
|
-
# Validate configuration
|
|
41
|
-
node validate-config.js
|
|
42
|
-
|
|
43
40
|
# Start server with sample config
|
|
44
|
-
|
|
41
|
+
llmjs2 --config config.yaml --port 3001
|
|
45
42
|
|
|
46
43
|
# Test the API
|
|
47
44
|
curl -X POST http://localhost:3001/v1/chat/completions \
|
|
@@ -59,8 +56,34 @@ curl -X POST http://localhost:3001/v1/chat/completions \
|
|
|
59
56
|
# {"role": "assistant", "content": "Hi there!"}
|
|
60
57
|
# ]
|
|
61
58
|
# }
|
|
59
|
+
|
|
60
|
+
## Programmatic Configuration
|
|
61
|
+
|
|
62
|
+
For advanced users, you can configure llmjs2 router programmatically instead of using YAML:
|
|
63
|
+
|
|
64
|
+
```bash
|
|
65
|
+
npm run router:example
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
See `server-config.js` for a complete example of configuring models, guardrails, and routing in JavaScript code, with direct completion usage.
|
|
69
|
+
|
|
70
|
+
## AI Chat App
|
|
71
|
+
|
|
72
|
+
Experience llmjs2 with a simple terminal-based chat interface:
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
npm run chat
|
|
62
76
|
```
|
|
63
77
|
|
|
78
|
+
Features:
|
|
79
|
+
- Conversational chat with message history
|
|
80
|
+
- Automatic model routing (random selection)
|
|
81
|
+
- Shows which model was used for each response
|
|
82
|
+
- Simple guardrails (logging)
|
|
83
|
+
- Graceful exit with "exit", "quit", or "bye"
|
|
84
|
+
|
|
85
|
+
The chat app uses the same router configuration as the programmatic examples but provides an interactive chat experience.
|
|
86
|
+
|
|
64
87
|
See `CONFIG_README.md` for detailed configuration examples.
|
|
65
88
|
|
|
66
89
|
## Quick Start
|
|
@@ -369,6 +392,69 @@ Run the test suite:
|
|
|
369
392
|
npm test
|
|
370
393
|
```
|
|
371
394
|
|
|
395
|
+
Test basic completion functionality:
|
|
396
|
+
|
|
397
|
+
```bash
|
|
398
|
+
npm run test:completion
|
|
399
|
+
```
|
|
400
|
+
|
|
401
|
+
## Logging
|
|
402
|
+
|
|
403
|
+
Configure logging levels for LLM provider requests and responses:
|
|
404
|
+
|
|
405
|
+
```bash
|
|
406
|
+
# Show all logs (DEBUG, WARN, INFO, ERROR)
|
|
407
|
+
LLMJS2_LOG=debug node your-script.js
|
|
408
|
+
|
|
409
|
+
# Show INFO and ERROR logs only
|
|
410
|
+
LLMJS2_LOG=info node your-script.js
|
|
411
|
+
|
|
412
|
+
# Show WARN, INFO, and ERROR logs
|
|
413
|
+
LLMJS2_LOG=warn node your-script.js
|
|
414
|
+
|
|
415
|
+
# Show ERROR logs only
|
|
416
|
+
LLMJS2_LOG=error node your-script.js
|
|
417
|
+
|
|
418
|
+
# Examples
|
|
419
|
+
LLMJS2_LOG=debug npm run chat
|
|
420
|
+
LLMJS2_LOG=info npm run test:completion
|
|
421
|
+
```
|
|
422
|
+
|
|
423
|
+
### Log Levels
|
|
424
|
+
|
|
425
|
+
- **DEBUG**: All logs including detailed request/response data
|
|
426
|
+
- **INFO**: LLM provider communication and important events
|
|
427
|
+
- **WARN**: Warnings and important notifications
|
|
428
|
+
- **ERROR**: Errors and failures
|
|
429
|
+
|
|
430
|
+
### Log Format
|
|
431
|
+
|
|
432
|
+
```
|
|
433
|
+
[TIMESTAMP] [LEVEL] MESSAGE
|
|
434
|
+
DATA_OBJECT (JSON formatted)
|
|
435
|
+
```
|
|
436
|
+
|
|
437
|
+
### Example Output
|
|
438
|
+
|
|
439
|
+
```
|
|
440
|
+
[2026-04-13T10:14:58.123Z] [INFO] LLMJS2 š¤ Sending to LLM provider
|
|
441
|
+
{
|
|
442
|
+
"source": "completion",
|
|
443
|
+
"provider": "ollama",
|
|
444
|
+
"model": "minimax-m2.5:cloud",
|
|
445
|
+
"apiKey": "2620e31dea...",
|
|
446
|
+
"messages": [{"role": "user", "content": "Hello!"}]
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
[2026-04-13T10:15:01.456Z] [INFO] LLMJS2 š„ Received from LLM provider
|
|
450
|
+
{
|
|
451
|
+
"source": "completion",
|
|
452
|
+
"content": "Hello! How can I help you?",
|
|
453
|
+
"role": "assistant",
|
|
454
|
+
"usage": {"prompt_eval_count": 10, "eval_count": 15}
|
|
455
|
+
}
|
|
456
|
+
```
|
|
457
|
+
|
|
372
458
|
## License
|
|
373
459
|
|
|
374
460
|
MIT
|
package/index.js
CHANGED
|
@@ -3,6 +3,7 @@ const OllamaProvider = require('./providers/ollama');
|
|
|
3
3
|
const OpenRouterProvider = require('./providers/openrouter');
|
|
4
4
|
const { router } = require('./router');
|
|
5
5
|
const { app } = require('./server');
|
|
6
|
+
const logger = require('./logger');
|
|
6
7
|
|
|
7
8
|
class LLMJS2 {
|
|
8
9
|
constructor(config = {}) {
|
|
@@ -166,8 +167,25 @@ class LLMJS2 {
|
|
|
166
167
|
provider.timeout = options.timeout;
|
|
167
168
|
}
|
|
168
169
|
|
|
170
|
+
// Log request information
|
|
171
|
+
const apiKeyPreview = provider.apiKey ? provider.apiKey.substring(0, 10) + '...' : 'none';
|
|
172
|
+
logger.info('LLMJS2 š¤ Sending to LLM provider', {
|
|
173
|
+
source: 'completion',
|
|
174
|
+
provider: provider.constructor.name.replace('Provider', '').toLowerCase(),
|
|
175
|
+
model: finalModel,
|
|
176
|
+
apiKey: apiKeyPreview,
|
|
177
|
+
messages: messages,
|
|
178
|
+
options: { ...options, model: finalModel }
|
|
179
|
+
});
|
|
180
|
+
|
|
169
181
|
const result = await provider.createCompletion(messages, { ...options, model: finalModel });
|
|
170
182
|
|
|
183
|
+
// Log response information
|
|
184
|
+
logger.info('LLMJS2 š„ Received from LLM provider', {
|
|
185
|
+
source: 'completion',
|
|
186
|
+
...result
|
|
187
|
+
});
|
|
188
|
+
|
|
171
189
|
return result.content;
|
|
172
190
|
|
|
173
191
|
} catch (error) {
|
package/logger.js
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Simple logging utility for llmjs2
|
|
5
|
+
* Provides structured logging with timestamps and levels
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
class Logger {
|
|
9
|
+
constructor() {
|
|
10
|
+
this.levels = {
|
|
11
|
+
DEBUG: 0, // Shows all logs
|
|
12
|
+
INFO: 1, // Shows INFO and ERROR only
|
|
13
|
+
WARN: 2, // Shows WARN, INFO, and ERROR
|
|
14
|
+
ERROR: 3, // Shows ERROR only
|
|
15
|
+
FATAL: 4
|
|
16
|
+
};
|
|
17
|
+
|
|
18
|
+
// Set level based on LLMJS2_LOG environment variable
|
|
19
|
+
this.setLevelFromEnv();
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
setLevelFromEnv() {
|
|
23
|
+
const envLevel = process.env.LLMJS2_LOG;
|
|
24
|
+
if (!envLevel) {
|
|
25
|
+
// Default to no logging if not set
|
|
26
|
+
this.currentLevel = 99; // Higher than any level
|
|
27
|
+
return;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
const upperEnvLevel = envLevel.toUpperCase();
|
|
31
|
+
if (this.levels[upperEnvLevel] !== undefined) {
|
|
32
|
+
this.currentLevel = this.levels[upperEnvLevel];
|
|
33
|
+
} else {
|
|
34
|
+
// Default to INFO if invalid value
|
|
35
|
+
this.currentLevel = this.levels.INFO;
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
setLevel(level) {
|
|
40
|
+
if (this.levels[level] !== undefined) {
|
|
41
|
+
this.currentLevel = this.levels[level];
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
formatMessage(level, message, data = null) {
|
|
46
|
+
const timestamp = new Date().toISOString();
|
|
47
|
+
let output = `[${timestamp}] [${level}] ${message}`;
|
|
48
|
+
|
|
49
|
+
if (data) {
|
|
50
|
+
output += '\n' + JSON.stringify(data, null, 2);
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
return output;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
shouldLog(level) {
|
|
57
|
+
const levelValue = this.levels[level];
|
|
58
|
+
|
|
59
|
+
// No logging if not configured
|
|
60
|
+
if (this.currentLevel === 99) return false;
|
|
61
|
+
|
|
62
|
+
// Custom logic based on user's requirements
|
|
63
|
+
if (this.currentLevel === this.levels.DEBUG) {
|
|
64
|
+
// Debug shows all levels
|
|
65
|
+
return true;
|
|
66
|
+
} else if (this.currentLevel === this.levels.INFO) {
|
|
67
|
+
// Info shows INFO and ERROR only
|
|
68
|
+
return level === 'INFO' || level === 'ERROR';
|
|
69
|
+
} else if (this.currentLevel === this.levels.WARN) {
|
|
70
|
+
// Warn shows WARN, INFO, and ERROR
|
|
71
|
+
return level === 'WARN' || level === 'INFO' || level === 'ERROR';
|
|
72
|
+
} else if (this.currentLevel === this.levels.ERROR) {
|
|
73
|
+
// Error shows ERROR only
|
|
74
|
+
return level === 'ERROR';
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
return false;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
debug(message, data = null) {
|
|
81
|
+
if (this.shouldLog('DEBUG')) {
|
|
82
|
+
console.log(this.formatMessage('DEBUG', message, data));
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
info(message, data = null) {
|
|
87
|
+
if (this.shouldLog('INFO')) {
|
|
88
|
+
console.log(this.formatMessage('INFO', message, data));
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
warn(message, data = null) {
|
|
93
|
+
if (this.shouldLog('WARN')) {
|
|
94
|
+
console.warn(this.formatMessage('WARN', message, data));
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
error(message, data = null) {
|
|
99
|
+
if (this.shouldLog('ERROR')) {
|
|
100
|
+
console.error(this.formatMessage('ERROR', message, data));
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
fatal(message, data = null) {
|
|
105
|
+
if (this.shouldLog('FATAL')) {
|
|
106
|
+
console.error(this.formatMessage('FATAL', message, data));
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// Create singleton logger instance
|
|
112
|
+
const logger = new Logger();
|
|
113
|
+
|
|
114
|
+
// Export for use in other modules
|
|
115
|
+
module.exports = logger;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "llmjs2",
|
|
3
|
-
"version": "1.3.
|
|
3
|
+
"version": "1.3.3",
|
|
4
4
|
"description": "A unified Node.js library for connecting to multiple LLM providers: OpenAI, Ollama, and OpenRouter",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"type": "commonjs",
|
|
@@ -8,6 +8,9 @@
|
|
|
8
8
|
"test": "node test.js",
|
|
9
9
|
"start": "node cli.js",
|
|
10
10
|
"server": "node cli.js",
|
|
11
|
+
"router:example": "node server-config.js",
|
|
12
|
+
"chat": "node chat-app.js",
|
|
13
|
+
"test:completion": "node test-completion.js",
|
|
11
14
|
"lint": "echo 'No linting configured'",
|
|
12
15
|
"typecheck": "echo 'No TypeScript configured'"
|
|
13
16
|
},
|
package/router.js
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
const OpenAIProvider = require('./providers/openai');
|
|
2
2
|
const OllamaProvider = require('./providers/ollama');
|
|
3
3
|
const OpenRouterProvider = require('./providers/openrouter');
|
|
4
|
+
const logger = require('./logger');
|
|
4
5
|
|
|
5
6
|
class Router {
|
|
6
7
|
constructor(modelList, strategy = 'default') {
|
|
@@ -180,7 +181,7 @@ class Router {
|
|
|
180
181
|
};
|
|
181
182
|
|
|
182
183
|
} catch (error) {
|
|
183
|
-
|
|
184
|
+
logger.error(`Router error`, { processId, error: error.message });
|
|
184
185
|
throw error;
|
|
185
186
|
}
|
|
186
187
|
}
|
|
@@ -200,8 +201,8 @@ class Router {
|
|
|
200
201
|
provider.apiKey = modelConfig.llm_params.api_key;
|
|
201
202
|
}
|
|
202
203
|
|
|
203
|
-
//
|
|
204
|
-
|
|
204
|
+
// Prepare the completion options
|
|
205
|
+
const completionOptions = {
|
|
205
206
|
model: actualModel,
|
|
206
207
|
temperature: input.temperature,
|
|
207
208
|
maxTokens: input.maxTokens,
|
|
@@ -211,7 +212,29 @@ class Router {
|
|
|
211
212
|
stop: input.stop,
|
|
212
213
|
tools: input.tools,
|
|
213
214
|
toolChoice: input.toolChoice
|
|
215
|
+
};
|
|
216
|
+
|
|
217
|
+
// Log request information
|
|
218
|
+
const apiKeyPreview = provider.apiKey ? provider.apiKey.substring(0, 10) + '...' : 'none';
|
|
219
|
+
logger.info('LLMJS2 š¤ Sending to LLM provider', {
|
|
220
|
+
source: 'router',
|
|
221
|
+
provider: providerName,
|
|
222
|
+
model: actualModel,
|
|
223
|
+
apiKey: apiKeyPreview,
|
|
224
|
+
messages: input.messages,
|
|
225
|
+
options: completionOptions
|
|
214
226
|
});
|
|
227
|
+
|
|
228
|
+
// Call the provider directly with just the actual model name (without provider prefix)
|
|
229
|
+
const result = await provider.createCompletion(input.messages, completionOptions);
|
|
230
|
+
|
|
231
|
+
// Log response information
|
|
232
|
+
logger.info('LLMJS2 š„ Received from LLM provider', {
|
|
233
|
+
source: 'router',
|
|
234
|
+
...result
|
|
235
|
+
});
|
|
236
|
+
|
|
237
|
+
return result;
|
|
215
238
|
}
|
|
216
239
|
|
|
217
240
|
generateProcessId() {
|
package/server.js
CHANGED
|
@@ -51,9 +51,12 @@ class Server {
|
|
|
51
51
|
return;
|
|
52
52
|
}
|
|
53
53
|
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
54
|
+
logger.debug('Incoming request', {
|
|
55
|
+
method: req.method,
|
|
56
|
+
url: req.url,
|
|
57
|
+
headers: req.headers,
|
|
58
|
+
bodyLength: JSON.stringify(body).length
|
|
59
|
+
});
|
|
57
60
|
|
|
58
61
|
// Process the completion request
|
|
59
62
|
const result = await this.processCompletion(body);
|
|
@@ -62,7 +65,7 @@ class Server {
|
|
|
62
65
|
this.sendSuccess(res, result);
|
|
63
66
|
|
|
64
67
|
} catch (error) {
|
|
65
|
-
|
|
68
|
+
logger.error('Server error', { error: error.message, url: req.url, method: req.method });
|
|
66
69
|
this.sendError(res, 500, 'Internal Server Error', error.message);
|
|
67
70
|
}
|
|
68
71
|
}
|
|
@@ -114,7 +117,10 @@ class Server {
|
|
|
114
117
|
throw new Error('No router configured. Use app.use(router) to add a router.');
|
|
115
118
|
}
|
|
116
119
|
|
|
117
|
-
|
|
120
|
+
logger.debug('Server processing completion request', {
|
|
121
|
+
model: body.model || 'auto-selected',
|
|
122
|
+
messageCount: body.messages?.length || 0
|
|
123
|
+
});
|
|
118
124
|
|
|
119
125
|
const routerResponse = await this.router.completion(body);
|
|
120
126
|
const { result, selectedModel, selectedModelName } = routerResponse;
|
|
@@ -168,6 +174,7 @@ class Server {
|
|
|
168
174
|
const server = http.createServer((req, res) => this.handleRequest(req, res));
|
|
169
175
|
|
|
170
176
|
server.listen(actualPort, actualHost, () => {
|
|
177
|
+
logger.info('Server started', { host: actualHost, port: actualPort });
|
|
171
178
|
console.log(`š llmjs2 server running on http://${actualHost}:${actualPort}`);
|
|
172
179
|
if (callback) callback();
|
|
173
180
|
});
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Simple Completion Test
|
|
5
|
+
* Tests basic llmjs2 completion functionality using defaults
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
const { completion } = require('./index');
|
|
9
|
+
|
|
10
|
+
async function testSimpleCompletion() {
|
|
11
|
+
console.log('š§Ŗ Testing simple completion with defaults...\n');
|
|
12
|
+
|
|
13
|
+
try {
|
|
14
|
+
console.log('š¤ Sending completion request...');
|
|
15
|
+
console.log('Prompt: "Hello! Can you tell me a short joke?"\n');
|
|
16
|
+
|
|
17
|
+
// Simple completion call - no model or API key specified
|
|
18
|
+
const response = await completion('Hello! Can you tell me a short joke?');
|
|
19
|
+
|
|
20
|
+
console.log('ā
Completion successful!');
|
|
21
|
+
console.log('š„ Response received:');
|
|
22
|
+
console.log('---');
|
|
23
|
+
console.log(response);
|
|
24
|
+
console.log('---\n');
|
|
25
|
+
|
|
26
|
+
// Basic validation
|
|
27
|
+
if (typeof response === 'string' && response.length > 0) {
|
|
28
|
+
console.log('ā
Response is valid string');
|
|
29
|
+
console.log(`š Response length: ${response.length} characters`);
|
|
30
|
+
} else {
|
|
31
|
+
console.log('ā Response is not a valid string');
|
|
32
|
+
return false;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
// Check if it looks like a joke or response
|
|
36
|
+
const hasJokeIndicators = response.toLowerCase().includes('joke') ||
|
|
37
|
+
response.toLowerCase().includes('knock knock') ||
|
|
38
|
+
response.includes('?') ||
|
|
39
|
+
response.includes('!');
|
|
40
|
+
|
|
41
|
+
if (hasJokeIndicators) {
|
|
42
|
+
console.log('ā
Response appears to be joke-related');
|
|
43
|
+
} else {
|
|
44
|
+
console.log('ā¹ļø Response received (may not be joke-related)');
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
console.log('\nš Simple completion test passed!');
|
|
48
|
+
return true;
|
|
49
|
+
|
|
50
|
+
} catch (error) {
|
|
51
|
+
console.error('ā Completion test failed:');
|
|
52
|
+
console.error('Error:', error.message);
|
|
53
|
+
|
|
54
|
+
if (error.message.includes('API key')) {
|
|
55
|
+
console.log('\nš” Tip: Make sure you have API keys set in environment variables:');
|
|
56
|
+
console.log(' export OLLAMA_API_KEY=your_key');
|
|
57
|
+
console.log(' export OPEN_ROUTER_API_KEY=your_key');
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
return false;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// Check environment before running
|
|
65
|
+
function checkEnvironment() {
|
|
66
|
+
const hasKeys = process.env.OLLAMA_API_KEY || process.env.OPEN_ROUTER_API_KEY;
|
|
67
|
+
|
|
68
|
+
if (!hasKeys) {
|
|
69
|
+
console.log('ā ļø No API keys found in environment variables.');
|
|
70
|
+
console.log(' Set at least one: OLLAMA_API_KEY or OPEN_ROUTER_API_KEY');
|
|
71
|
+
console.log(' The test will still run but may fail.\n');
|
|
72
|
+
} else {
|
|
73
|
+
console.log('ā
API keys found in environment\n');
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// Run the test
|
|
78
|
+
async function main() {
|
|
79
|
+
console.log('š Running Simple Completion Test for llmjs2\n');
|
|
80
|
+
|
|
81
|
+
checkEnvironment();
|
|
82
|
+
|
|
83
|
+
const success = await testSimpleCompletion();
|
|
84
|
+
|
|
85
|
+
if (success) {
|
|
86
|
+
console.log('\n⨠Test completed successfully!');
|
|
87
|
+
process.exit(0);
|
|
88
|
+
} else {
|
|
89
|
+
console.log('\nš„ Test failed!');
|
|
90
|
+
process.exit(1);
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
if (require.main === module) {
|
|
95
|
+
main();
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
module.exports = { testSimpleCompletion };
|
package/CONFIG_README.md
DELETED
|
@@ -1,98 +0,0 @@
|
|
|
1
|
-
# llmjs2 Configuration Guide
|
|
2
|
-
|
|
3
|
-
This directory contains sample configuration files for testing the llmjs2 library.
|
|
4
|
-
|
|
5
|
-
## Files
|
|
6
|
-
|
|
7
|
-
- `config.yaml` - Comprehensive sample configuration with models, guardrails, and routing
|
|
8
|
-
- `.env` - Sample environment variables (add your API keys here)
|
|
9
|
-
- `validate-config.js` - Script to validate your configuration
|
|
10
|
-
|
|
11
|
-
## Quick Start
|
|
12
|
-
|
|
13
|
-
1. **Add your API keys** to `.env`:
|
|
14
|
-
```bash
|
|
15
|
-
# Uncomment and add your keys
|
|
16
|
-
OPENAI_API_KEY=your_actual_openai_key
|
|
17
|
-
# OLLAMA_API_KEY and OPEN_ROUTER_API_KEY are already set for testing
|
|
18
|
-
```
|
|
19
|
-
|
|
20
|
-
2. **Validate the configuration**:
|
|
21
|
-
```bash
|
|
22
|
-
node validate-config.js
|
|
23
|
-
```
|
|
24
|
-
|
|
25
|
-
3. **Start the server**:
|
|
26
|
-
```bash
|
|
27
|
-
node cli.js --config config.yaml --port 3001
|
|
28
|
-
```
|
|
29
|
-
|
|
30
|
-
4. **Test the API**:
|
|
31
|
-
```bash
|
|
32
|
-
curl -X POST http://localhost:3001/v1/chat/completions \
|
|
33
|
-
-H "Content-Type: application/json" \
|
|
34
|
-
-d '{"messages":[{"role":"user","content":"Hello!"}]}'
|
|
35
|
-
```
|
|
36
|
-
|
|
37
|
-
## Configuration Structure
|
|
38
|
-
|
|
39
|
-
### Model List
|
|
40
|
-
Defines available models and their providers:
|
|
41
|
-
- `model_name`: Alias for routing (can have multiple providers)
|
|
42
|
-
- `llm_params`: Provider-specific configuration
|
|
43
|
-
- `model`: Full model identifier in format `[provider]/[actual-model-name]` (e.g., `openai/gpt-4`, `ollama/minimax-m2.5:cloud`)
|
|
44
|
-
- `api_key`: API key (supports `os.environ/VAR_NAME` syntax)
|
|
45
|
-
- `api_base`: Optional custom API endpoint
|
|
46
|
-
|
|
47
|
-
**Important**: The `[provider]/` prefix is used internally for routing. When requests are sent to LLM providers, only the `[actual-model-name]` part is used.
|
|
48
|
-
|
|
49
|
-
### Guardrails
|
|
50
|
-
Custom processing logic for requests/responses:
|
|
51
|
-
- `name`: Unique identifier
|
|
52
|
-
- `mode`: `pre_call` (before LLM) or `post_call` (after LLM)
|
|
53
|
-
- `code`: JavaScript function as string
|
|
54
|
-
|
|
55
|
-
### Router Settings
|
|
56
|
-
- `routing_strategy`: `default`, `random`, or `sequential`
|
|
57
|
-
|
|
58
|
-
## Testing Different Features
|
|
59
|
-
|
|
60
|
-
### Load Balancing
|
|
61
|
-
The config includes multiple models with the same `model_name` (like `free-model`) to demonstrate load balancing.
|
|
62
|
-
|
|
63
|
-
### Guardrails
|
|
64
|
-
Try sending requests with inappropriate content to see filtering in action:
|
|
65
|
-
```bash
|
|
66
|
-
curl -X POST http://localhost:3001/v1/chat/completions \
|
|
67
|
-
-H "Content-Type: application/json" \
|
|
68
|
-
-d '{"messages":[{"role":"user","content":"This contains badword"}]}'
|
|
69
|
-
```
|
|
70
|
-
|
|
71
|
-
### Different Routing Strategies
|
|
72
|
-
Edit `router_settings.routing_strategy` to test:
|
|
73
|
-
- `random`: Random model selection
|
|
74
|
-
- `sequential`: Cycle through models
|
|
75
|
-
- `default`: Load balance same model names
|
|
76
|
-
|
|
77
|
-
## Environment Variables
|
|
78
|
-
|
|
79
|
-
The configuration supports environment variables:
|
|
80
|
-
- `os.environ/VAR_NAME` syntax in YAML
|
|
81
|
-
- `.env` file loaded automatically
|
|
82
|
-
- Standard environment variables override defaults
|
|
83
|
-
|
|
84
|
-
## Troubleshooting
|
|
85
|
-
|
|
86
|
-
- **"Model not found"**: Check that API keys are set in `.env`
|
|
87
|
-
- **"Rate limit exceeded"**: The config includes rate limiting (10 requests/minute)
|
|
88
|
-
- **Server won't start**: Run `node validate-config.js` to check configuration
|
|
89
|
-
- **API errors**: Check server logs for detailed error messages
|
|
90
|
-
|
|
91
|
-
## Production Usage
|
|
92
|
-
|
|
93
|
-
For production:
|
|
94
|
-
1. Remove test API keys from `.env`
|
|
95
|
-
2. Add your actual API keys
|
|
96
|
-
3. Configure proper rate limiting and content filtering
|
|
97
|
-
4. Set up proper logging and monitoring
|
|
98
|
-
5. Use environment-specific config files (dev/prod)
|
package/validate-config.js
DELETED
|
@@ -1,87 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
|
|
3
|
-
// Test script to validate config.yaml loading
|
|
4
|
-
const yaml = require('yaml');
|
|
5
|
-
const fs = require('fs');
|
|
6
|
-
const path = require('path');
|
|
7
|
-
|
|
8
|
-
function loadAndValidateConfig() {
|
|
9
|
-
const configPath = path.join(__dirname, 'config.yaml');
|
|
10
|
-
|
|
11
|
-
if (!fs.existsSync(configPath)) {
|
|
12
|
-
console.error('ā config.yaml not found');
|
|
13
|
-
return false;
|
|
14
|
-
}
|
|
15
|
-
|
|
16
|
-
try {
|
|
17
|
-
const configContent = fs.readFileSync(configPath, 'utf8');
|
|
18
|
-
const config = yaml.parse(configContent);
|
|
19
|
-
|
|
20
|
-
console.log('ā
Config file loaded successfully');
|
|
21
|
-
console.log(`š Found ${config.model_list?.length || 0} models`);
|
|
22
|
-
console.log(`š”ļø Found ${config.guardrails?.length || 0} guardrails`);
|
|
23
|
-
console.log(`š Routing strategy: ${config.router_settings?.routing_strategy || 'default'}`);
|
|
24
|
-
|
|
25
|
-
// Validate model list
|
|
26
|
-
if (!config.model_list || !Array.isArray(config.model_list)) {
|
|
27
|
-
console.error('ā model_list must be an array');
|
|
28
|
-
return false;
|
|
29
|
-
}
|
|
30
|
-
|
|
31
|
-
// Check each model
|
|
32
|
-
const modelNames = [];
|
|
33
|
-
for (const model of config.model_list) {
|
|
34
|
-
if (!model.model_name || !model.llm_params?.model) {
|
|
35
|
-
console.error('ā Each model must have model_name and llm_params.model');
|
|
36
|
-
return false;
|
|
37
|
-
}
|
|
38
|
-
modelNames.push(model.model_name);
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
console.log(`š Model names: ${[...new Set(modelNames)].join(', ')}`);
|
|
42
|
-
|
|
43
|
-
// Validate guardrails
|
|
44
|
-
if (config.guardrails) {
|
|
45
|
-
for (const guardrail of config.guardrails) {
|
|
46
|
-
if (!guardrail.name || !guardrail.mode || !guardrail.code) {
|
|
47
|
-
console.error(`ā Guardrail "${guardrail.name || 'unnamed'}" missing required fields`);
|
|
48
|
-
return false;
|
|
49
|
-
}
|
|
50
|
-
if (!['pre_call', 'post_call'].includes(guardrail.mode)) {
|
|
51
|
-
console.error(`ā Guardrail "${guardrail.name}" has invalid mode: ${guardrail.mode}`);
|
|
52
|
-
return false;
|
|
53
|
-
}
|
|
54
|
-
}
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
// Test environment variable resolution
|
|
58
|
-
const testModel = config.model_list[0];
|
|
59
|
-
if (testModel.llm_params.api_key?.startsWith('os.environ/')) {
|
|
60
|
-
const envVar = testModel.llm_params.api_key.replace('os.environ/', '');
|
|
61
|
-
const envValue = process.env[envVar];
|
|
62
|
-
console.log(`š API key for ${testModel.llm_params.model}: ${envValue ? 'ā
Set' : 'ā Not set'}`);
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
console.log('š Config validation passed!');
|
|
66
|
-
return true;
|
|
67
|
-
|
|
68
|
-
} catch (error) {
|
|
69
|
-
console.error('ā Config validation failed:', error.message);
|
|
70
|
-
return false;
|
|
71
|
-
}
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
// Run validation
|
|
75
|
-
console.log('š Validating config.yaml...\n');
|
|
76
|
-
const success = loadAndValidateConfig();
|
|
77
|
-
|
|
78
|
-
if (!success) {
|
|
79
|
-
process.exit(1);
|
|
80
|
-
}
|
|
81
|
-
|
|
82
|
-
console.log('\nš” To test the server, run:');
|
|
83
|
-
console.log(' node cli.js --config config.yaml --port 3001');
|
|
84
|
-
console.log('\nš To test with curl:');
|
|
85
|
-
console.log(' curl -X POST http://localhost:3001/v1/chat/completions \\');
|
|
86
|
-
console.log(' -H "Content-Type: application/json" \\');
|
|
87
|
-
console.log(' -d \'{"messages":[{"role":"user","content":"Hello!"}]}\'');
|