llmjs2 1.3.2 โ†’ 1.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -392,6 +392,69 @@ Run the test suite:
392
392
  npm test
393
393
  ```
394
394
 
395
+ Test basic completion functionality:
396
+
397
+ ```bash
398
+ npm run test:completion
399
+ ```
400
+
401
+ ## Logging
402
+
403
+ Configure logging levels for LLM provider requests and responses:
404
+
405
+ ```bash
406
+ # Show all logs (DEBUG, WARN, INFO, ERROR)
407
+ LLMJS2_LOG=debug node your-script.js
408
+
409
+ # Show INFO and ERROR logs only
410
+ LLMJS2_LOG=info node your-script.js
411
+
412
+ # Show WARN, INFO, and ERROR logs
413
+ LLMJS2_LOG=warn node your-script.js
414
+
415
+ # Show ERROR logs only
416
+ LLMJS2_LOG=error node your-script.js
417
+
418
+ # Examples
419
+ LLMJS2_LOG=debug npm run chat
420
+ LLMJS2_LOG=info npm run test:completion
421
+ ```
422
+
423
+ ### Log Levels
424
+
425
+ - **DEBUG**: All logs including detailed request/response data
426
+ - **INFO**: LLM provider communication and important events
427
+ - **WARN**: Warnings and important notifications
428
+ - **ERROR**: Errors and failures
429
+
430
+ ### Log Format
431
+
432
+ ```
433
+ [TIMESTAMP] [LEVEL] MESSAGE
434
+ DATA_OBJECT (JSON formatted)
435
+ ```
436
+
437
+ ### Example Output
438
+
439
+ ```
440
+ [2026-04-13T10:14:58.123Z] [INFO] LLMJS2 ๐Ÿ“ค Sending to LLM provider
441
+ {
442
+ "source": "completion",
443
+ "provider": "ollama",
444
+ "model": "minimax-m2.5:cloud",
445
+ "apiKey": "2620e31dea...",
446
+ "messages": [{"role": "user", "content": "Hello!"}]
447
+ }
448
+
449
+ [2026-04-13T10:15:01.456Z] [INFO] LLMJS2 ๐Ÿ“ฅ Received from LLM provider
450
+ {
451
+ "source": "completion",
452
+ "content": "Hello! How can I help you?",
453
+ "role": "assistant",
454
+ "usage": {"prompt_eval_count": 10, "eval_count": 15}
455
+ }
456
+ ```
457
+
395
458
  ## License
396
459
 
397
460
  MIT
package/index.js CHANGED
@@ -3,6 +3,7 @@ const OllamaProvider = require('./providers/ollama');
3
3
  const OpenRouterProvider = require('./providers/openrouter');
4
4
  const { router } = require('./router');
5
5
  const { app } = require('./server');
6
+ const logger = require('./logger');
6
7
 
7
8
  class LLMJS2 {
8
9
  constructor(config = {}) {
@@ -166,8 +167,25 @@ class LLMJS2 {
166
167
  provider.timeout = options.timeout;
167
168
  }
168
169
 
170
+ // Log request information
171
+ const apiKeyPreview = provider.apiKey ? provider.apiKey.substring(0, 10) + '...' : 'none';
172
+ logger.info('LLMJS2 ๐Ÿ“ค Sending to LLM provider', {
173
+ source: 'completion',
174
+ provider: provider.constructor.name.replace('Provider', '').toLowerCase(),
175
+ model: finalModel,
176
+ apiKey: apiKeyPreview,
177
+ messages: messages,
178
+ options: { ...options, model: finalModel }
179
+ });
180
+
169
181
  const result = await provider.createCompletion(messages, { ...options, model: finalModel });
170
182
 
183
+ // Log response information
184
+ logger.info('LLMJS2 ๐Ÿ“ฅ Received from LLM provider', {
185
+ source: 'completion',
186
+ ...result
187
+ });
188
+
171
189
  return result.content;
172
190
 
173
191
  } catch (error) {
package/logger.js ADDED
@@ -0,0 +1,115 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * Simple logging utility for llmjs2
5
+ * Provides structured logging with timestamps and levels
6
+ */
7
+
8
+ class Logger {
9
+ constructor() {
10
+ this.levels = {
11
+ DEBUG: 0, // Shows all logs
12
+ INFO: 1, // Shows INFO and ERROR only
13
+ WARN: 2, // Shows WARN, INFO, and ERROR
14
+ ERROR: 3, // Shows ERROR only
15
+ FATAL: 4
16
+ };
17
+
18
+ // Set level based on LLMJS2_LOG environment variable
19
+ this.setLevelFromEnv();
20
+ }
21
+
22
+ setLevelFromEnv() {
23
+ const envLevel = process.env.LLMJS2_LOG;
24
+ if (!envLevel) {
25
+ // Default to no logging if not set
26
+ this.currentLevel = 99; // Higher than any level
27
+ return;
28
+ }
29
+
30
+ const upperEnvLevel = envLevel.toUpperCase();
31
+ if (this.levels[upperEnvLevel] !== undefined) {
32
+ this.currentLevel = this.levels[upperEnvLevel];
33
+ } else {
34
+ // Default to INFO if invalid value
35
+ this.currentLevel = this.levels.INFO;
36
+ }
37
+ }
38
+
39
+ setLevel(level) {
40
+ if (this.levels[level] !== undefined) {
41
+ this.currentLevel = this.levels[level];
42
+ }
43
+ }
44
+
45
+ formatMessage(level, message, data = null) {
46
+ const timestamp = new Date().toISOString();
47
+ let output = `[${timestamp}] [${level}] ${message}`;
48
+
49
+ if (data) {
50
+ output += '\n' + JSON.stringify(data, null, 2);
51
+ }
52
+
53
+ return output;
54
+ }
55
+
56
+ shouldLog(level) {
57
+ const levelValue = this.levels[level];
58
+
59
+ // No logging if not configured
60
+ if (this.currentLevel === 99) return false;
61
+
62
+ // Custom logic based on user's requirements
63
+ if (this.currentLevel === this.levels.DEBUG) {
64
+ // Debug shows all levels
65
+ return true;
66
+ } else if (this.currentLevel === this.levels.INFO) {
67
+ // Info shows INFO and ERROR only
68
+ return level === 'INFO' || level === 'ERROR';
69
+ } else if (this.currentLevel === this.levels.WARN) {
70
+ // Warn shows WARN, INFO, and ERROR
71
+ return level === 'WARN' || level === 'INFO' || level === 'ERROR';
72
+ } else if (this.currentLevel === this.levels.ERROR) {
73
+ // Error shows ERROR only
74
+ return level === 'ERROR';
75
+ }
76
+
77
+ return false;
78
+ }
79
+
80
+ debug(message, data = null) {
81
+ if (this.shouldLog('DEBUG')) {
82
+ console.log(this.formatMessage('DEBUG', message, data));
83
+ }
84
+ }
85
+
86
+ info(message, data = null) {
87
+ if (this.shouldLog('INFO')) {
88
+ console.log(this.formatMessage('INFO', message, data));
89
+ }
90
+ }
91
+
92
+ warn(message, data = null) {
93
+ if (this.shouldLog('WARN')) {
94
+ console.warn(this.formatMessage('WARN', message, data));
95
+ }
96
+ }
97
+
98
+ error(message, data = null) {
99
+ if (this.shouldLog('ERROR')) {
100
+ console.error(this.formatMessage('ERROR', message, data));
101
+ }
102
+ }
103
+
104
+ fatal(message, data = null) {
105
+ if (this.shouldLog('FATAL')) {
106
+ console.error(this.formatMessage('FATAL', message, data));
107
+ }
108
+ }
109
+ }
110
+
111
+ // Create singleton logger instance
112
+ const logger = new Logger();
113
+
114
+ // Export for use in other modules
115
+ module.exports = logger;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "llmjs2",
3
- "version": "1.3.2",
3
+ "version": "1.3.3",
4
4
  "description": "A unified Node.js library for connecting to multiple LLM providers: OpenAI, Ollama, and OpenRouter",
5
5
  "main": "index.js",
6
6
  "type": "commonjs",
@@ -10,6 +10,7 @@
10
10
  "server": "node cli.js",
11
11
  "router:example": "node server-config.js",
12
12
  "chat": "node chat-app.js",
13
+ "test:completion": "node test-completion.js",
13
14
  "lint": "echo 'No linting configured'",
14
15
  "typecheck": "echo 'No TypeScript configured'"
15
16
  },
package/router.js CHANGED
@@ -1,6 +1,7 @@
1
1
  const OpenAIProvider = require('./providers/openai');
2
2
  const OllamaProvider = require('./providers/ollama');
3
3
  const OpenRouterProvider = require('./providers/openrouter');
4
+ const logger = require('./logger');
4
5
 
5
6
  class Router {
6
7
  constructor(modelList, strategy = 'default') {
@@ -180,7 +181,7 @@ class Router {
180
181
  };
181
182
 
182
183
  } catch (error) {
183
- console.error(`[${processId}] Router error:`, error.message);
184
+ logger.error(`Router error`, { processId, error: error.message });
184
185
  throw error;
185
186
  }
186
187
  }
@@ -200,8 +201,8 @@ class Router {
200
201
  provider.apiKey = modelConfig.llm_params.api_key;
201
202
  }
202
203
 
203
- // Call the provider directly with just the actual model name (without provider prefix)
204
- return await provider.createCompletion(input.messages, {
204
+ // Prepare the completion options
205
+ const completionOptions = {
205
206
  model: actualModel,
206
207
  temperature: input.temperature,
207
208
  maxTokens: input.maxTokens,
@@ -211,7 +212,29 @@ class Router {
211
212
  stop: input.stop,
212
213
  tools: input.tools,
213
214
  toolChoice: input.toolChoice
215
+ };
216
+
217
+ // Log request information
218
+ const apiKeyPreview = provider.apiKey ? provider.apiKey.substring(0, 10) + '...' : 'none';
219
+ logger.info('LLMJS2 ๐Ÿ“ค Sending to LLM provider', {
220
+ source: 'router',
221
+ provider: providerName,
222
+ model: actualModel,
223
+ apiKey: apiKeyPreview,
224
+ messages: input.messages,
225
+ options: completionOptions
214
226
  });
227
+
228
+ // Call the provider directly with just the actual model name (without provider prefix)
229
+ const result = await provider.createCompletion(input.messages, completionOptions);
230
+
231
+ // Log response information
232
+ logger.info('LLMJS2 ๐Ÿ“ฅ Received from LLM provider', {
233
+ source: 'router',
234
+ ...result
235
+ });
236
+
237
+ return result;
215
238
  }
216
239
 
217
240
  generateProcessId() {
package/server.js CHANGED
@@ -51,9 +51,12 @@ class Server {
51
51
  return;
52
52
  }
53
53
 
54
- console.log(`[${new Date().toISOString()}] ${req.method} ${req.url}`);
55
- console.log('Headers:', JSON.stringify(req.headers));
56
- console.log('Body parsing completed successfully');
54
+ logger.debug('Incoming request', {
55
+ method: req.method,
56
+ url: req.url,
57
+ headers: req.headers,
58
+ bodyLength: JSON.stringify(body).length
59
+ });
57
60
 
58
61
  // Process the completion request
59
62
  const result = await this.processCompletion(body);
@@ -62,7 +65,7 @@ class Server {
62
65
  this.sendSuccess(res, result);
63
66
 
64
67
  } catch (error) {
65
- console.error('Server error:', error);
68
+ logger.error('Server error', { error: error.message, url: req.url, method: req.method });
66
69
  this.sendError(res, 500, 'Internal Server Error', error.message);
67
70
  }
68
71
  }
@@ -114,7 +117,10 @@ class Server {
114
117
  throw new Error('No router configured. Use app.use(router) to add a router.');
115
118
  }
116
119
 
117
- console.log(`Starting completion with model: ${body.model || 'auto-selected'}`);
120
+ logger.debug('Server processing completion request', {
121
+ model: body.model || 'auto-selected',
122
+ messageCount: body.messages?.length || 0
123
+ });
118
124
 
119
125
  const routerResponse = await this.router.completion(body);
120
126
  const { result, selectedModel, selectedModelName } = routerResponse;
@@ -168,6 +174,7 @@ class Server {
168
174
  const server = http.createServer((req, res) => this.handleRequest(req, res));
169
175
 
170
176
  server.listen(actualPort, actualHost, () => {
177
+ logger.info('Server started', { host: actualHost, port: actualPort });
171
178
  console.log(`๐Ÿš€ llmjs2 server running on http://${actualHost}:${actualPort}`);
172
179
  if (callback) callback();
173
180
  });
@@ -0,0 +1,98 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * Simple Completion Test
5
+ * Tests basic llmjs2 completion functionality using defaults
6
+ */
7
+
8
+ const { completion } = require('./index');
9
+
10
+ async function testSimpleCompletion() {
11
+ console.log('๐Ÿงช Testing simple completion with defaults...\n');
12
+
13
+ try {
14
+ console.log('๐Ÿ“ค Sending completion request...');
15
+ console.log('Prompt: "Hello! Can you tell me a short joke?"\n');
16
+
17
+ // Simple completion call - no model or API key specified
18
+ const response = await completion('Hello! Can you tell me a short joke?');
19
+
20
+ console.log('โœ… Completion successful!');
21
+ console.log('๐Ÿ“ฅ Response received:');
22
+ console.log('---');
23
+ console.log(response);
24
+ console.log('---\n');
25
+
26
+ // Basic validation
27
+ if (typeof response === 'string' && response.length > 0) {
28
+ console.log('โœ… Response is valid string');
29
+ console.log(`๐Ÿ“ Response length: ${response.length} characters`);
30
+ } else {
31
+ console.log('โŒ Response is not a valid string');
32
+ return false;
33
+ }
34
+
35
+ // Check if it looks like a joke or response
36
+ const hasJokeIndicators = response.toLowerCase().includes('joke') ||
37
+ response.toLowerCase().includes('knock knock') ||
38
+ response.includes('?') ||
39
+ response.includes('!');
40
+
41
+ if (hasJokeIndicators) {
42
+ console.log('โœ… Response appears to be joke-related');
43
+ } else {
44
+ console.log('โ„น๏ธ Response received (may not be joke-related)');
45
+ }
46
+
47
+ console.log('\n๐ŸŽ‰ Simple completion test passed!');
48
+ return true;
49
+
50
+ } catch (error) {
51
+ console.error('โŒ Completion test failed:');
52
+ console.error('Error:', error.message);
53
+
54
+ if (error.message.includes('API key')) {
55
+ console.log('\n๐Ÿ’ก Tip: Make sure you have API keys set in environment variables:');
56
+ console.log(' export OLLAMA_API_KEY=your_key');
57
+ console.log(' export OPEN_ROUTER_API_KEY=your_key');
58
+ }
59
+
60
+ return false;
61
+ }
62
+ }
63
+
64
+ // Check environment before running
65
+ function checkEnvironment() {
66
+ const hasKeys = process.env.OLLAMA_API_KEY || process.env.OPEN_ROUTER_API_KEY;
67
+
68
+ if (!hasKeys) {
69
+ console.log('โš ๏ธ No API keys found in environment variables.');
70
+ console.log(' Set at least one: OLLAMA_API_KEY or OPEN_ROUTER_API_KEY');
71
+ console.log(' The test will still run but may fail.\n');
72
+ } else {
73
+ console.log('โœ… API keys found in environment\n');
74
+ }
75
+ }
76
+
77
+ // Run the test
78
+ async function main() {
79
+ console.log('๐Ÿš€ Running Simple Completion Test for llmjs2\n');
80
+
81
+ checkEnvironment();
82
+
83
+ const success = await testSimpleCompletion();
84
+
85
+ if (success) {
86
+ console.log('\nโœจ Test completed successfully!');
87
+ process.exit(0);
88
+ } else {
89
+ console.log('\n๐Ÿ’ฅ Test failed!');
90
+ process.exit(1);
91
+ }
92
+ }
93
+
94
+ if (require.main === module) {
95
+ main();
96
+ }
97
+
98
+ module.exports = { testSimpleCompletion };