llmjs2 1.3.8 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/README.md +31 -476
  2. package/chain/AGENT_STEP_README.md +102 -0
  3. package/chain/README.md +257 -0
  4. package/chain/WORKFLOW_README.md +85 -0
  5. package/chain/agent-step-example.js +232 -0
  6. package/chain/docs/AGENT.md +126 -0
  7. package/chain/docs/GRAPH.md +490 -0
  8. package/chain/examples.js +314 -0
  9. package/chain/index.js +31 -0
  10. package/chain/lib/agent.js +338 -0
  11. package/chain/lib/flow/agent-step.js +119 -0
  12. package/chain/lib/flow/edge.js +24 -0
  13. package/chain/lib/flow/flow.js +76 -0
  14. package/chain/lib/flow/graph.js +331 -0
  15. package/chain/lib/flow/index.js +7 -0
  16. package/chain/lib/flow/step.js +63 -0
  17. package/chain/lib/memory/in-memory.js +117 -0
  18. package/chain/lib/memory/index.js +36 -0
  19. package/chain/lib/memory/lance-memory.js +225 -0
  20. package/chain/lib/memory/sqlite-memory.js +309 -0
  21. package/chain/simple-agent-step-example.js +168 -0
  22. package/chain/workflow-example-usage.js +70 -0
  23. package/chain/workflow-example.json +59 -0
  24. package/core/README.md +485 -0
  25. package/core/cli.js +275 -0
  26. package/core/docs/BASIC_USAGE.md +62 -0
  27. package/core/docs/CLI.md +104 -0
  28. package/{docs → core/docs}/GET_STARTED.md +129 -129
  29. package/{docs → core/docs}/GUARDRAILS_GUIDE.md +734 -734
  30. package/{docs → core/docs}/README.md +47 -47
  31. package/core/docs/ROUTER_GUIDE.md +199 -0
  32. package/{docs → core/docs}/SERVER_MODE.md +358 -350
  33. package/core/index.js +115 -0
  34. package/{providers → core/providers}/ollama.js +14 -6
  35. package/{providers → core/providers}/openai.js +14 -6
  36. package/{providers → core/providers}/openrouter.js +14 -6
  37. package/core/router.js +252 -0
  38. package/{server.js → core/server.js} +15 -5
  39. package/package.json +43 -27
  40. package/cli.js +0 -195
  41. package/docs/BASIC_USAGE.md +0 -296
  42. package/docs/CLI.md +0 -455
  43. package/docs/ROUTER_GUIDE.md +0 -402
  44. package/index.js +0 -265
  45. package/router.js +0 -273
  46. package/test-completion.js +0 -99
  47. package/test.js +0 -246
  48. /package/{config.yaml → core/config.yaml} +0 -0
  49. /package/{logger.js → core/logger.js} +0 -0
package/core/cli.js ADDED
@@ -0,0 +1,275 @@
1
+ #!/usr/bin/env node
2
+
3
+ const { app } = require('./server');
4
+ const { router } = require('./router');
5
+ const { Graph } = require('../chain');
6
+ const yaml = require('yaml');
7
+ const fs = require('fs');
8
+ const path = require('path');
9
+
10
+ class CLI {
11
+ constructor() {
12
+ this.args = process.argv.slice(2);
13
+ this.options = this.parseArgs();
14
+ }
15
+
16
+ parseArgs() {
17
+ const options = {
18
+ mode: 'start',
19
+ start: false,
20
+ port: 3000,
21
+ host: 'localhost',
22
+ config: null,
23
+ flowPath: null,
24
+ inputStr: '',
25
+ help: false
26
+ };
27
+
28
+ if (this.args.length > 0 && this.args[0] === 'run') {
29
+ options.mode = 'run';
30
+ if (this.args.length < 2) {
31
+ console.error('Missing flow file for run command');
32
+ this.showHelp();
33
+ process.exit(1);
34
+ }
35
+ options.flowPath = this.args[1];
36
+ options.inputStr = this.args.slice(2).join(' ');
37
+ return options;
38
+ }
39
+
40
+ for (let i = 0; i < this.args.length; i++) {
41
+ const arg = this.args[i];
42
+
43
+ switch (arg) {
44
+ case '-S':
45
+ options.start = true;
46
+ break;
47
+ case '-p':
48
+ case '--port':
49
+ options.port = parseInt(this.args[++i], 10);
50
+ break;
51
+ case '-H':
52
+ case '--host':
53
+ options.host = this.args[++i];
54
+ break;
55
+ case '-c':
56
+ case '--config':
57
+ options.config = this.args[++i];
58
+ break;
59
+ case '-h':
60
+ case '--help':
61
+ options.help = true;
62
+ break;
63
+ default:
64
+ if (arg.startsWith('-')) {
65
+ console.error(`Unknown option: ${arg}`);
66
+ this.showHelp();
67
+ process.exit(1);
68
+ } else {
69
+ console.error(`Unexpected positional argument: ${arg}`);
70
+ this.showHelp();
71
+ process.exit(1);
72
+ }
73
+ }
74
+ }
75
+
76
+ return options;
77
+ }
78
+
79
+ parseInputs(inputStr) {
80
+ const inputs = {};
81
+ if (!inputStr.trim()) return inputs;
82
+
83
+ const pairs = inputStr.split(';').map((s) => s.trim()).filter((s) => s);
84
+ for (const pair of pairs) {
85
+ const [key, value] = pair.split('=');
86
+ if (key && value) {
87
+ const cleanValue = value.replace(/^["']|["']$/g, '');
88
+ inputs[key.trim()] = cleanValue;
89
+ }
90
+ }
91
+ return inputs;
92
+ }
93
+
94
+ async runWorkflow() {
95
+ try {
96
+ const config = JSON.parse(fs.readFileSync(this.options.flowPath, 'utf8'));
97
+ const workflow = Graph.load(config).compile();
98
+ const inputs = this.parseInputs(this.options.inputStr);
99
+
100
+ console.log(`Running workflow: ${config.name || this.options.flowPath}`);
101
+ console.log(`Inputs: ${JSON.stringify(inputs, null, 2)}`);
102
+
103
+ const result = await workflow.run(inputs);
104
+
105
+ console.log('Workflow completed successfully');
106
+ console.log(JSON.stringify(result, null, 2));
107
+ } catch (error) {
108
+ console.error('Failed to run workflow:', error.message);
109
+ process.exit(1);
110
+ }
111
+ }
112
+
113
+ showHelp() {
114
+ console.log(`
115
+ llmjs2 - OpenAI-Compatible API Server and Workflow Runner
116
+
117
+ USAGE:
118
+ llmjs2 -S [options] Start the API server
119
+ llmjs2 run <flow> [inputs] Run a workflow from JSON file
120
+
121
+ DESCRIPTION:
122
+ Starts an OpenAI-compatible API server with intelligent routing and guardrails,
123
+ or runs workflows defined in JSON files with agent steps and conditional logic.
124
+
125
+ OPTIONS:
126
+ -S Start the server
127
+ -c, --config <file> YAML config file with models, guardrails, and routing
128
+ -p, --port <port> Port to listen on (default: 3000)
129
+ -H, --host <host> Host to bind to (default: localhost)
130
+ -h, --help Show this help message
131
+
132
+ EXAMPLES:
133
+ llmjs2 -S
134
+ llmjs2 -S --config core/config.yaml
135
+ llmjs2 -S --port 8080 --host 0.0.0.0
136
+ llmjs2 run chain/workflow-example.json
137
+ llmjs2 run chain/workflow-example.json input="hello";reference="world"
138
+ `);
139
+ }
140
+
141
+ loadConfig() {
142
+ if (!this.options.config) {
143
+ return this.createDefaultConfig();
144
+ }
145
+
146
+ const configPath = path.resolve(this.options.config);
147
+ if (!fs.existsSync(configPath)) {
148
+ throw new Error(`Configuration file not found: ${configPath}`);
149
+ }
150
+
151
+ const configContent = fs.readFileSync(configPath, 'utf8');
152
+ const config = yaml.parse(configContent);
153
+ return this.resolveEnvVars(config);
154
+ }
155
+
156
+ createDefaultConfig() {
157
+ return {
158
+ model_list: [
159
+ {
160
+ model_name: 'default',
161
+ llm_params: {
162
+ model: process.env.OLLAMA_DEFAULT_MODEL || 'ollama/minimax-m2.5:cloud',
163
+ api_key: process.env.OLLAMA_API_KEY,
164
+ api_base: process.env.OLLAMA_BASE_URL
165
+ }
166
+ }
167
+ ],
168
+ guardrails: [],
169
+ router_settings: {
170
+ routing_strategy: 'default',
171
+ allow_unsafe_guardrails: false
172
+ }
173
+ };
174
+ }
175
+
176
+ resolveEnvVars(obj) {
177
+ if (typeof obj === 'string') {
178
+ if (obj.startsWith('os.environ/')) {
179
+ const envVar = obj.replace('os.environ/', '');
180
+ return process.env[envVar] || obj;
181
+ }
182
+ return obj;
183
+ }
184
+
185
+ if (Array.isArray(obj)) {
186
+ return obj.map((item) => this.resolveEnvVars(item));
187
+ }
188
+
189
+ if (obj && typeof obj === 'object') {
190
+ const resolved = {};
191
+ for (const [key, value] of Object.entries(obj)) {
192
+ resolved[key] = this.resolveEnvVars(value);
193
+ }
194
+ return resolved;
195
+ }
196
+
197
+ return obj;
198
+ }
199
+
200
+ createRouter(config) {
201
+ const settings = config.router_settings || {};
202
+ const route = router(config.model_list || [], settings.routing_strategy || 'default', {
203
+ allowUnsafeGuardrails: Boolean(settings.allow_unsafe_guardrails)
204
+ });
205
+
206
+ if (Array.isArray(config.guardrails) && config.guardrails.length > 0) {
207
+ route.setGuardrails(config.guardrails);
208
+ }
209
+
210
+ return route;
211
+ }
212
+
213
+ async run() {
214
+ if (this.options.help) {
215
+ this.showHelp();
216
+ return;
217
+ }
218
+
219
+ if (this.options.mode === 'run') {
220
+ await this.runWorkflow();
221
+ return;
222
+ }
223
+
224
+ if (!this.options.start && this.args.length === 0) {
225
+ this.options.start = true;
226
+ }
227
+
228
+ if (!this.options.start) {
229
+ this.showHelp();
230
+ return;
231
+ }
232
+
233
+ try {
234
+ const config = this.loadConfig();
235
+ const route = this.createRouter(config);
236
+
237
+ console.log('Starting llmjs2 server...');
238
+ console.log(`Configuration: ${this.options.config || 'default'}`);
239
+ console.log(`Models: ${route.getAvailableModels().join(', ')}`);
240
+ console.log(`Strategy: ${(config.router_settings || {}).routing_strategy || 'default'}`);
241
+ console.log(`Guardrails: ${(config.guardrails || []).length}`);
242
+
243
+ app.use(route);
244
+ app.listen(this.options.port, this.options.host);
245
+ } catch (error) {
246
+ console.error('Failed to start server:', error.message);
247
+ process.exit(1);
248
+ }
249
+ }
250
+ }
251
+
252
+ function loadEnvFile() {
253
+ const envPath = path.join(process.cwd(), '.env');
254
+ if (!fs.existsSync(envPath)) {
255
+ return;
256
+ }
257
+
258
+ const envContent = fs.readFileSync(envPath, 'utf8');
259
+ const envVars = envContent.split('\n').filter((line) => line.trim() && !line.startsWith('#'));
260
+
261
+ envVars.forEach((line) => {
262
+ const [key, ...rest] = line.split('=');
263
+ const value = rest.join('=');
264
+ if (key && value) {
265
+ process.env[key.trim()] = value.trim();
266
+ }
267
+ });
268
+ }
269
+
270
+ loadEnvFile();
271
+ const cli = new CLI();
272
+ cli.run().catch((error) => {
273
+ console.error(error.message || String(error));
274
+ process.exit(1);
275
+ });
@@ -0,0 +1,62 @@
1
+ # Basic Usage Guide
2
+
3
+ This guide keeps llmjs2 usage minimal with only two completion patterns.
4
+
5
+ ## Pattern 1: Simple Prompt
6
+
7
+ ```javascript
8
+ import { completion } from 'llmjs2';
9
+
10
+ const response = await completion('Explain quantum physics in simple terms');
11
+ console.log(response);
12
+ ```
13
+
14
+ Behavior:
15
+ - Reads configured providers from environment variables or constructor config.
16
+ - Picks one available provider randomly for each request.
17
+ - Uses that provider's default model.
18
+ - Returns the assistant text directly.
19
+
20
+ ## Pattern 2: Full Object API
21
+
22
+ ```javascript
23
+ import { completion } from 'llmjs2';
24
+
25
+ const response = await completion({
26
+ model: 'ollama/minimax-m2.5:cloud',
27
+ host: 'https://ollama.com/api/chat', // optional - use env or default one
28
+ messages: [
29
+ { role: 'system', content: 'You are a helpful coding assistant.' },
30
+ { role: 'user', content: 'Write a function to reverse a string in JavaScript.' }
31
+ ],
32
+ apiKey: 'your-api-key', // optional - uses env vars if not provided
33
+ tools: [] // optional
34
+ });
35
+
36
+ console.log(response);
37
+ ```
38
+
39
+ Notes:
40
+ - `model` format must be `provider/model_name`.
41
+ - `host` is optional and overrides provider base URL for this request.
42
+ - `apiKey` is optional and overrides provider API key for this request.
43
+ - `tools` is optional for function calling.
44
+
45
+ ## Environment Variables
46
+
47
+ ```bash
48
+ # API Keys
49
+ OPENAI_API_KEY=...
50
+ OPEN_ROUTER_API_KEY=...
51
+ OLLAMA_API_KEY=...
52
+
53
+ # Default Models
54
+ OPENAI_DEFAULT_MODEL=gpt-3.5-turbo
55
+ OPEN_ROUTER_DEFAULT_MODEL=openrouter/free
56
+ OLLAMA_DEFAULT_MODEL=minimax-m2.5:cloud
57
+
58
+ # Optional Base URLs
59
+ OPENAI_BASE_URL=https://api.openai.com/v1
60
+ OPEN_ROUTER_BASE_URL=https://openrouter.ai/api/v1/chat/completions
61
+ OLLAMA_BASE_URL=https://ollama.com/api/chat
62
+ ```
@@ -0,0 +1,104 @@
1
+ # CLI Guide
2
+
3
+ Use the llmjs2 CLI to run the OpenAI-style API server or execute JSON workflows.
4
+
5
+ ## Install
6
+
7
+ ```bash
8
+ npm install -g llmjs2
9
+ ```
10
+
11
+ ## Commands
12
+
13
+ ```bash
14
+ llmjs2 # Start server with defaults
15
+ llmjs2 -S # Explicitly start server
16
+ llmjs2 -S --config core/config.yaml
17
+ llmjs2 -S --port 8080 --host 0.0.0.0
18
+ llmjs2 run chain/workflow-example.json
19
+ llmjs2 run chain/workflow-example.json input="hello";reference="world"
20
+ llmjs2 --help
21
+ ```
22
+
23
+ ## Start Server
24
+
25
+ The server listens on POST /v1/chat/completions and returns an OpenAI-style response body.
26
+
27
+ ```bash
28
+ llmjs2 -S --config core/config.yaml --port 3000
29
+ ```
30
+
31
+ ### Options
32
+
33
+ - -S: Start the API server.
34
+ - -c, --config <file>: YAML config file with models, guardrails, and routing.
35
+ - -p, --port <port>: Port to listen on (default: 3000).
36
+ - -H, --host <host>: Host to bind to (default: localhost).
37
+ - -h, --help: Show help.
38
+
39
+ ## Workflow Runner
40
+
41
+ Run a workflow JSON file from the chain subsystem.
42
+
43
+ ```bash
44
+ llmjs2 run chain/workflow-example.json
45
+ ```
46
+
47
+ Pass workflow inputs as key=value pairs separated by semicolons:
48
+
49
+ ```bash
50
+ llmjs2 run chain/workflow-example.json input="hello";reference="world"
51
+ ```
52
+
53
+ ## Configuration File
54
+
55
+ Example config:
56
+
57
+ ```yaml
58
+ model_list:
59
+ - model_name: default
60
+ llm_params:
61
+ model: ollama/minimax-m2.5:cloud
62
+ api_key: os.environ/OLLAMA_API_KEY
63
+ api_base: os.environ/OLLAMA_BASE_URL
64
+
65
+ guardrails:
66
+ - name: request_logger
67
+ mode: pre_call
68
+ code: |
69
+ (processId, input) => {
70
+ console.log(`[${processId}] incoming messages=${input.messages.length}`);
71
+ return input;
72
+ }
73
+
74
+ router_settings:
75
+ routing_strategy: default
76
+ allow_unsafe_guardrails: false
77
+ ```
78
+
79
+ Notes:
80
+ - String guardrails are disabled by default.
81
+ - To allow string guardrails, set router_settings.allow_unsafe_guardrails=true or set LLMJS2_ALLOW_UNSAFE_GUARDRAILS=true.
82
+
83
+ ## Environment Variables
84
+
85
+ ```bash
86
+ OPENAI_API_KEY=...
87
+ OPEN_ROUTER_API_KEY=...
88
+ OLLAMA_API_KEY=...
89
+
90
+ OPENAI_DEFAULT_MODEL=gpt-4o-mini
91
+ OPEN_ROUTER_DEFAULT_MODEL=openrouter/free
92
+ OLLAMA_DEFAULT_MODEL=minimax-m2.5:cloud
93
+
94
+ OPENAI_BASE_URL=https://api.openai.com/v1
95
+ OPEN_ROUTER_BASE_URL=https://openrouter.ai/api/v1/chat/completions
96
+ OLLAMA_BASE_URL=https://ollama.com/api/chat
97
+
98
+ PORT=3000
99
+ HOST=localhost
100
+ ```
101
+
102
+ ## .env Support
103
+
104
+ If a .env file exists in the current directory, llmjs2 loads it automatically before running.
@@ -1,129 +1,129 @@
1
- # Getting Started with llmjs2
2
-
3
- Welcome to llmjs2! This guide will get you up and running in just 5 minutes.
4
-
5
- ## Prerequisites
6
-
7
- Before you begin, make sure you have:
8
-
9
- - Node.js version 14.0.0 or higher
10
- - API keys for at least one LLM provider (Ollama or OpenRouter)
11
-
12
- ## Installation
13
-
14
- ### Option 1: Global Installation (Recommended)
15
-
16
- Install llmjs2 globally to use it from anywhere:
17
-
18
- ```bash
19
- npm install -g llmjs2
20
- ```
21
-
22
- ### Option 2: Local Installation
23
-
24
- Install in your project directory:
25
-
26
- ```bash
27
- npm install llmjs2
28
- ```
29
-
30
- ## Quick Setup
31
-
32
- ### Step 1: Set API Keys
33
-
34
- Set your API keys as environment variables:
35
-
36
- ```bash
37
- # For Ollama (get your key from https://ollama.com)
38
- export OLLAMA_API_KEY=your_ollama_api_key_here
39
-
40
- # For OpenRouter (get your key from https://openrouter.ai)
41
- export OPEN_ROUTER_API_KEY=your_openrouter_api_key_here
42
- ```
43
-
44
- ### Step 2: Your First Completion
45
-
46
- Create a simple test script:
47
-
48
- ```javascript
49
- // hello-world.js
50
- import { completion } from 'llmjs2';
51
-
52
- async function helloWorld() {
53
- try {
54
- const response = await completion('Hello! Introduce yourself in one sentence.');
55
- console.log('Response:', response);
56
- } catch (error) {
57
- console.error('Error:', error.message);
58
- }
59
- }
60
-
61
- helloWorld();
62
- ```
63
-
64
- Run it:
65
-
66
- ```bash
67
- node hello-world.js
68
- ```
69
-
70
- **Expected Output:**
71
-
72
- ```
73
- Response: Hello! I'm an AI assistant powered by llmjs2, designed to help with various tasks and provide intelligent responses.
74
- ```
75
-
76
- ### Step 3: Test Different Models
77
-
78
- Try using specific models:
79
-
80
- ```javascript
81
- // test-models.js
82
- import { completion } from 'llmjs2';
83
-
84
- async function testModels() {
85
- try {
86
- // Test Ollama
87
- if (process.env.OLLAMA_API_KEY) {
88
- const ollamaResponse = await completion('ollama/minimax-m2.5:cloud', 'What is AI?');
89
- console.log('Ollama:', ollamaResponse);
90
- }
91
-
92
- // Test OpenRouter
93
- if (process.env.OPEN_ROUTER_API_KEY) {
94
- const openrouterResponse = await completion('openrouter/openrouter/free', 'What is machine learning?');
95
- console.log('OpenRouter:', openrouterResponse);
96
- }
97
- } catch (error) {
98
- console.error('Error:', error.message);
99
- }
100
- }
101
-
102
- testModels();
103
- ```
104
-
105
- ## What's Next?
106
-
107
- Now that you have llmjs2 working, you can:
108
-
109
- 1. **[Learn Basic Usage](BASIC_USAGE.md)** - Explore different API patterns and configuration options
110
- 2. **[Run as a Server](SERVER_MODE.md)** - Set up an OpenAI-compatible API server
111
- 3. **[Use the CLI](CLI.md)** - Manage servers and configurations from the command line
112
-
113
- ## Supported Providers
114
-
115
- llmjs2 supports multiple LLM providers:
116
-
117
- - **Ollama** - Connect to Ollama's cloud API
118
- - **OpenRouter** - Access various models through OpenRouter
119
-
120
- ## Need Help?
121
-
122
- If you encounter any issues:
123
-
124
- 1. Check that your API keys are valid and have credits
125
- 2. Ensure you're using the correct model format: `provider/model_name`
126
- 3. Try the examples in this guide first
127
- 4. Check your internet connection
128
-
129
- For more detailed usage, see the other guides in this documentation.
1
+ # Getting Started with llmjs2
2
+
3
+ Welcome to llmjs2! This guide will get you up and running in just 5 minutes.
4
+
5
+ ## Prerequisites
6
+
7
+ Before you begin, make sure you have:
8
+
9
+ - Node.js version 14.0.0 or higher
10
+ - API keys for at least one LLM provider (Ollama or OpenRouter)
11
+
12
+ ## Installation
13
+
14
+ ### Option 1: Global Installation (Recommended)
15
+
16
+ Install llmjs2 globally to use it from anywhere:
17
+
18
+ ```bash
19
+ npm install -g llmjs2
20
+ ```
21
+
22
+ ### Option 2: Local Installation
23
+
24
+ Install in your project directory:
25
+
26
+ ```bash
27
+ npm install llmjs2
28
+ ```
29
+
30
+ ## Quick Setup
31
+
32
+ ### Step 1: Set API Keys
33
+
34
+ Set your API keys as environment variables:
35
+
36
+ ```bash
37
+ # For Ollama (get your key from https://ollama.com)
38
+ export OLLAMA_API_KEY=your_ollama_api_key_here
39
+
40
+ # For OpenRouter (get your key from https://openrouter.ai)
41
+ export OPEN_ROUTER_API_KEY=your_OPEN_ROUTER_API_KEY_here
42
+ ```
43
+
44
+ ### Step 2: Your First Completion
45
+
46
+ Create a simple test script:
47
+
48
+ ```javascript
49
+ // hello-world.js
50
+ import { completion } from 'llmjs2';
51
+
52
+ async function helloWorld() {
53
+ try {
54
+ const response = await completion('Hello! Introduce yourself in one sentence.');
55
+ console.log('Response:', response);
56
+ } catch (error) {
57
+ console.error('Error:', error.message);
58
+ }
59
+ }
60
+
61
+ helloWorld();
62
+ ```
63
+
64
+ Run it:
65
+
66
+ ```bash
67
+ node hello-world.js
68
+ ```
69
+
70
+ **Expected Output:**
71
+
72
+ ```
73
+ Response: Hello! I'm an AI assistant powered by llmjs2, designed to help with various tasks and provide intelligent responses.
74
+ ```
75
+
76
+ ### Step 3: Test Different Models
77
+
78
+ Try using specific models:
79
+
80
+ ```javascript
81
+ // test-models.js
82
+ import { completion } from 'llmjs2';
83
+
84
+ async function testModels() {
85
+ try {
86
+ // Test Ollama
87
+ if (process.env.OLLAMA_API_KEY) {
88
+ const ollamaResponse = await completion('ollama/minimax-m2.5:cloud', 'What is AI?');
89
+ console.log('Ollama:', ollamaResponse);
90
+ }
91
+
92
+ // Test OpenRouter
93
+ if (process.env.OPEN_ROUTER_API_KEY) {
94
+ const openrouterResponse = await completion('openrouter/openrouter/free', 'What is machine learning?');
95
+ console.log('OpenRouter:', openrouterResponse);
96
+ }
97
+ } catch (error) {
98
+ console.error('Error:', error.message);
99
+ }
100
+ }
101
+
102
+ testModels();
103
+ ```
104
+
105
+ ## What's Next?
106
+
107
+ Now that you have llmjs2 working, you can:
108
+
109
+ 1. **[Learn Basic Usage](BASIC_USAGE.md)** - Explore different API patterns and configuration options
110
+ 2. **[Run as a Server](SERVER_MODE.md)** - Set up an OpenAI-compatible API server
111
+ 3. **[Use the CLI](CLI.md)** - Manage servers and configurations from the command line
112
+
113
+ ## Supported Providers
114
+
115
+ llmjs2 supports multiple LLM providers:
116
+
117
+ - **Ollama** - Connect to Ollama's cloud API
118
+ - **OpenRouter** - Access various models through OpenRouter
119
+
120
+ ## Need Help?
121
+
122
+ If you encounter any issues:
123
+
124
+ 1. Check that your API keys are valid and have credits
125
+ 2. Ensure you're using the correct model format: `provider/model_name`
126
+ 3. Try the examples in this guide first
127
+ 4. Check your internet connection
128
+
129
+ For more detailed usage, see the other guides in this documentation.