llmjs2 1.3.8 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/README.md +31 -476
  2. package/chain/AGENT_STEP_README.md +102 -0
  3. package/chain/README.md +257 -0
  4. package/chain/WORKFLOW_README.md +85 -0
  5. package/chain/agent-step-example.js +232 -0
  6. package/chain/docs/AGENT.md +126 -0
  7. package/chain/docs/GRAPH.md +490 -0
  8. package/chain/examples.js +314 -0
  9. package/chain/index.js +31 -0
  10. package/chain/lib/agent.js +338 -0
  11. package/chain/lib/flow/agent-step.js +119 -0
  12. package/chain/lib/flow/edge.js +24 -0
  13. package/chain/lib/flow/flow.js +76 -0
  14. package/chain/lib/flow/graph.js +331 -0
  15. package/chain/lib/flow/index.js +7 -0
  16. package/chain/lib/flow/step.js +63 -0
  17. package/chain/lib/memory/in-memory.js +117 -0
  18. package/chain/lib/memory/index.js +36 -0
  19. package/chain/lib/memory/lance-memory.js +225 -0
  20. package/chain/lib/memory/sqlite-memory.js +309 -0
  21. package/chain/simple-agent-step-example.js +168 -0
  22. package/chain/workflow-example-usage.js +70 -0
  23. package/chain/workflow-example.json +59 -0
  24. package/core/README.md +485 -0
  25. package/core/cli.js +275 -0
  26. package/core/docs/BASIC_USAGE.md +62 -0
  27. package/core/docs/CLI.md +104 -0
  28. package/{docs → core/docs}/GET_STARTED.md +129 -129
  29. package/{docs → core/docs}/GUARDRAILS_GUIDE.md +734 -734
  30. package/{docs → core/docs}/README.md +47 -47
  31. package/core/docs/ROUTER_GUIDE.md +199 -0
  32. package/{docs → core/docs}/SERVER_MODE.md +358 -350
  33. package/core/index.js +115 -0
  34. package/{providers → core/providers}/ollama.js +14 -6
  35. package/{providers → core/providers}/openai.js +14 -6
  36. package/{providers → core/providers}/openrouter.js +14 -6
  37. package/core/router.js +252 -0
  38. package/{server.js → core/server.js} +15 -5
  39. package/package.json +43 -27
  40. package/cli.js +0 -195
  41. package/docs/BASIC_USAGE.md +0 -296
  42. package/docs/CLI.md +0 -455
  43. package/docs/ROUTER_GUIDE.md +0 -402
  44. package/index.js +0 -265
  45. package/router.js +0 -273
  46. package/test-completion.js +0 -99
  47. package/test.js +0 -246
  48. /package/{config.yaml → core/config.yaml} +0 -0
  49. /package/{logger.js → core/logger.js} +0 -0
package/router.js DELETED
@@ -1,273 +0,0 @@
1
- const OpenAIProvider = require('./providers/openai');
2
- const OllamaProvider = require('./providers/ollama');
3
- const OpenRouterProvider = require('./providers/openrouter');
4
- const logger = require('./logger');
5
-
6
- class Router {
7
- constructor(modelList, strategy = 'default') {
8
- this.modelList = this.normalizeModelList(modelList);
9
- this.strategy = strategy;
10
- this.providers = {
11
- openai: new OpenAIProvider(),
12
- ollama: new OllamaProvider(),
13
- openrouter: new OpenRouterProvider()
14
- };
15
- this.guardrails = [];
16
- this.sequentialIndex = 0;
17
-
18
- // Group models by model_name for load balancing
19
- this.modelsByName = {};
20
- this.modelList.forEach(model => {
21
- if (!this.modelsByName[model.model_name]) {
22
- this.modelsByName[model.model_name] = [];
23
- }
24
- this.modelsByName[model.model_name].push(model);
25
- });
26
- }
27
-
28
- normalizeModelList(modelList) {
29
- return modelList.map(model => ({
30
- model_name: model.model_name,
31
- llm_params: {
32
- model: model.llm_params.model,
33
- api_key: this.resolveApiKey(model.llm_params.api_key),
34
- api_base: model.llm_params.api_base
35
- }
36
- }));
37
- }
38
-
39
- resolveApiKey(apiKey) {
40
- if (typeof apiKey === 'string' && apiKey.startsWith('os.environ/')) {
41
- const envVar = apiKey.replace('os.environ/', '');
42
- return process.env[envVar] || apiKey;
43
- }
44
- return apiKey;
45
- }
46
-
47
- setGuardrails(guardrails) {
48
- this.guardrails = guardrails || [];
49
- }
50
-
51
- selectModel(modelName) {
52
- if (!modelName) {
53
- // Auto-routing based on strategy
54
- return this.autoSelectModel();
55
- }
56
-
57
- // Direct model selection with load balancing
58
- const availableModels = this.modelsByName[modelName];
59
- if (!availableModels || availableModels.length === 0) {
60
- throw new Error(`Model '${modelName}' not found in router configuration`);
61
- }
62
-
63
- if (availableModels.length === 1) {
64
- return availableModels[0];
65
- }
66
-
67
- // Load balancing for multiple models with same name
68
- const randomIndex = Math.floor(Math.random() * availableModels.length);
69
- return availableModels[randomIndex];
70
- }
71
-
72
- autoSelectModel() {
73
- const allModels = this.modelList;
74
-
75
- switch (this.strategy) {
76
- case 'random':
77
- const randomIndex = Math.floor(Math.random() * allModels.length);
78
- return allModels[randomIndex];
79
-
80
- case 'sequential':
81
- const model = allModels[this.sequentialIndex];
82
- this.sequentialIndex = (this.sequentialIndex + 1) % allModels.length;
83
- return model;
84
-
85
- case 'default':
86
- default:
87
- // Use sequential selection for auto-routing
88
- const selectedModel = allModels[this.sequentialIndex];
89
- this.sequentialIndex = (this.sequentialIndex + 1) % allModels.length;
90
- return selectedModel;
91
- }
92
- }
93
-
94
- async applyPreGuardrails(processId, input) {
95
- let currentInput = input;
96
-
97
- for (const guardrail of this.guardrails) {
98
- if (guardrail.mode === 'pre_call') {
99
- try {
100
- const result = await this.executeGuardrail(guardrail, processId, currentInput);
101
- if (result === null || result === undefined) {
102
- throw new Error(`Guardrail '${guardrail.name}' returned null/undefined`);
103
- }
104
- currentInput = result;
105
- } catch (error) {
106
- throw new Error(`Pre-call guardrail '${guardrail.name}' failed: ${error.message}`);
107
- }
108
- }
109
- }
110
-
111
- return currentInput;
112
- }
113
-
114
- async applyPostGuardrails(processId, result) {
115
- let currentResult = result;
116
-
117
- for (const guardrail of this.guardrails) {
118
- if (guardrail.mode === 'post_call') {
119
- try {
120
- const processed = await this.executeGuardrail(guardrail, processId, currentResult);
121
- if (processed === null || processed === undefined) {
122
- throw new Error(`Guardrail '${guardrail.name}' returned null/undefined`);
123
- }
124
- currentResult = processed;
125
- } catch (error) {
126
- throw new Error(`Post-call guardrail '${guardrail.name}' failed: ${error.message}`);
127
- }
128
- }
129
- }
130
-
131
- return currentResult;
132
- }
133
-
134
- async executeGuardrail(guardrail, processId, data) {
135
- if (typeof guardrail.code === 'string') {
136
- // Execute string code as function
137
- const func = new Function('processId', 'data', `return (${guardrail.code})(processId, data)`);
138
- return await func(processId, data);
139
- } else if (typeof guardrail.code === 'function') {
140
- return await guardrail.code(processId, data);
141
- } else {
142
- throw new Error(`Invalid guardrail code for '${guardrail.name}'`);
143
- }
144
- }
145
-
146
- async completion(options) {
147
- const processId = this.generateProcessId();
148
- let selectedModel;
149
-
150
- try {
151
- // Select model
152
- selectedModel = this.selectModel(options.model);
153
- console.log(`[${processId}] Selected model: ${selectedModel.llm_params.model}`);
154
-
155
- // Prepare input for guardrails
156
- const input = {
157
- model: selectedModel.llm_params.model,
158
- messages: options.messages || [],
159
- temperature: options.temperature,
160
- maxTokens: options.max_tokens || options.maxTokens,
161
- topP: options.top_p || options.topP,
162
- frequencyPenalty: options.frequency_penalty || options.frequencyPenalty,
163
- presencePenalty: options.presence_penalty || options.presencePenalty,
164
- stop: options.stop,
165
- tools: options.tools,
166
- toolChoice: options.tool_choice || options.toolChoice
167
- };
168
-
169
- // Apply pre-call guardrails
170
- const processedInput = await this.applyPreGuardrails(processId, input);
171
-
172
- // Create completion using selected model
173
- const result = await this.callProvider(selectedModel, processedInput);
174
-
175
- // Apply post-call guardrails
176
- const finalResult = await this.applyPostGuardrails(processId, result);
177
-
178
- // Return result with selected model information
179
- return {
180
- result: finalResult,
181
- selectedModel: selectedModel.llm_params.model, // Full model name with provider prefix
182
- selectedModelName: selectedModel.model_name // User-friendly model name
183
- };
184
-
185
- } catch (error) {
186
- logger.error(`Router error`, { processId, error: error.message });
187
- throw error;
188
- }
189
- }
190
-
191
- async callProvider(modelConfig, input) {
192
- const firstSlashIndex = modelConfig.llm_params.model.indexOf('/');
193
- const providerName = firstSlashIndex !== -1 ? modelConfig.llm_params.model.substring(0, firstSlashIndex) : '';
194
- const actualModel = firstSlashIndex !== -1 ? modelConfig.llm_params.model.substring(firstSlashIndex + 1) : modelConfig.llm_params.model;
195
- const provider = this.providers[providerName];
196
-
197
- if (!provider) {
198
- throw new Error(`Unknown provider: ${providerName}`);
199
- }
200
-
201
- // Set the API key for this request (only if provided, otherwise use provider's default)
202
- if (modelConfig.llm_params.api_key !== undefined) {
203
- provider.apiKey = modelConfig.llm_params.api_key;
204
- }
205
-
206
- // Prepare the completion options
207
- const completionOptions = {
208
- model: actualModel,
209
- temperature: input.temperature,
210
- maxTokens: input.maxTokens,
211
- topP: input.topP,
212
- frequencyPenalty: input.frequencyPenalty,
213
- presencePenalty: input.presencePenalty,
214
- stop: input.stop,
215
- tools: input.tools,
216
- toolChoice: input.toolChoice
217
- };
218
-
219
- // Log request information
220
- const apiKeyPreview = provider.apiKey ? provider.apiKey.substring(0, 10) + '...' : 'none';
221
- logger.info('LLMJS2 📤 Sending to LLM provider', {
222
- source: 'router',
223
- provider: providerName,
224
- model: actualModel,
225
- apiKey: apiKeyPreview,
226
- messages: input.messages,
227
- options: completionOptions
228
- });
229
-
230
- // Call the provider directly with just the actual model name (without provider prefix)
231
- const result = await provider.createCompletion(input.messages, completionOptions);
232
-
233
- // Log response information
234
- logger.info('LLMJS2 📥 Received from LLM provider', {
235
- source: 'router',
236
- ...result
237
- });
238
-
239
- return result;
240
- }
241
-
242
- generateProcessId() {
243
- return `req_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
244
- }
245
-
246
- // Method to add guardrails dynamically
247
- addGuardrail(guardrail) {
248
- this.guardrails.push(guardrail);
249
- }
250
-
251
- // Method to get available models
252
- getAvailableModels() {
253
- return Object.keys(this.modelsByName);
254
- }
255
-
256
- // Method to get model count for load balancing info
257
- getModelStats() {
258
- const stats = {};
259
- Object.keys(this.modelsByName).forEach(modelName => {
260
- stats[modelName] = this.modelsByName[modelName].length;
261
- });
262
- return stats;
263
- }
264
- }
265
-
266
- function router(modelList, strategy = 'default') {
267
- return new Router(modelList, strategy);
268
- }
269
-
270
- module.exports = {
271
- Router,
272
- router
273
- };
@@ -1,99 +0,0 @@
1
- #!/usr/bin/env node
2
-
3
- /**
4
- * Simple Completion Test
5
- * Tests basic llmjs2 completion functionality using defaults
6
- */
7
-
8
- const { completion } = require('./index');
9
-
10
- async function testSimpleCompletion() {
11
- console.log('🧪 Testing simple completion with defaults...\n');
12
-
13
- try {
14
- console.log('📤 Sending completion request...');
15
- console.log('Prompt: "Hello! Can you tell me a short joke?"');
16
- console.log('Note: No model specified - will auto-select provider based on available API keys\n');
17
-
18
- // Simple completion call - no model or API key specified
19
- const response = await completion('Hello! Can you tell me a short joke?');
20
-
21
- console.log('✅ Completion successful!');
22
- console.log('📥 Response received:');
23
- console.log('---');
24
- console.log(response);
25
- console.log('---\n');
26
-
27
- // Basic validation
28
- if (typeof response === 'string' && response.length > 0) {
29
- console.log('✅ Response is valid string');
30
- console.log(`📏 Response length: ${response.length} characters`);
31
- } else {
32
- console.log('❌ Response is not a valid string');
33
- return false;
34
- }
35
-
36
- // Check if it looks like a joke or response
37
- const hasJokeIndicators = response.toLowerCase().includes('joke') ||
38
- response.toLowerCase().includes('knock knock') ||
39
- response.includes('?') ||
40
- response.includes('!');
41
-
42
- if (hasJokeIndicators) {
43
- console.log('✅ Response appears to be joke-related');
44
- } else {
45
- console.log('ℹ️ Response received (may not be joke-related)');
46
- }
47
-
48
- console.log('\n🎉 Simple completion test passed!');
49
- return true;
50
-
51
- } catch (error) {
52
- console.error('❌ Completion test failed:');
53
- console.error('Error:', error.message);
54
-
55
- if (error.message.includes('API key')) {
56
- console.log('\n💡 Tip: Make sure you have API keys set in environment variables:');
57
- console.log(' export OLLAMA_API_KEY=your_key');
58
- console.log(' export OPEN_ROUTER_API_KEY=your_key');
59
- }
60
-
61
- return false;
62
- }
63
- }
64
-
65
- // Check environment before running
66
- function checkEnvironment() {
67
- const hasKeys = process.env.OLLAMA_API_KEY || process.env.OPEN_ROUTER_API_KEY;
68
-
69
- if (!hasKeys) {
70
- console.log('⚠️ No API keys found in environment variables.');
71
- console.log(' Set at least one: OLLAMA_API_KEY or OPEN_ROUTER_API_KEY');
72
- console.log(' The test will still run but may fail.\n');
73
- } else {
74
- console.log('✅ API keys found in environment\n');
75
- }
76
- }
77
-
78
- // Run the test
79
- async function main() {
80
- console.log('🚀 Running Simple Completion Test for llmjs2\n');
81
-
82
- checkEnvironment();
83
-
84
- const success = await testSimpleCompletion();
85
-
86
- if (success) {
87
- console.log('\n✨ Test completed successfully!');
88
- process.exit(0);
89
- } else {
90
- console.log('\n💥 Test failed!');
91
- process.exit(1);
92
- }
93
- }
94
-
95
- if (require.main === module) {
96
- main();
97
- }
98
-
99
- module.exports = { testSimpleCompletion };
package/test.js DELETED
@@ -1,246 +0,0 @@
1
- // Import all modules
2
- const { completion: llmCompletion, LLMJS2: LLMJS2Class, router, app } = require('./index');
3
-
4
- // Basic validation tests
5
- async function runTests() {
6
- console.log('Running LLMJS2 comprehensive tests...\n');
7
-
8
- let testCount = 0;
9
- let passedCount = 0;
10
-
11
- // Test 1: Input validation
12
- testCount++;
13
- try {
14
- await llmCompletion('');
15
- console.log('❌ Test 1 failed: Should reject empty prompt');
16
- } catch (error) {
17
- console.log('✅ Test 1 passed: Empty prompt rejected');
18
- passedCount++;
19
- }
20
-
21
- // Test 2: Invalid input type
22
- testCount++;
23
- try {
24
- await llmCompletion(123);
25
- console.log('❌ Test 2 failed: Should reject non-string/non-object input');
26
- } catch (error) {
27
- console.log('✅ Test 2 passed: Invalid input type rejected');
28
- passedCount++;
29
- }
30
-
31
- // Test 3: Invalid messages array
32
- testCount++;
33
- try {
34
- await llmCompletion({ messages: [] });
35
- console.log('❌ Test 3 failed: Should reject empty messages array');
36
- } catch (error) {
37
- console.log('✅ Test 3 passed: Empty messages array rejected');
38
- passedCount++;
39
- }
40
-
41
- // Test 4: Invalid message format
42
- testCount++;
43
- try {
44
- await llmCompletion({ messages: [{ content: 'test' }] });
45
- console.log('❌ Test 4 failed: Should reject message without role');
46
- } catch (error) {
47
- console.log('✅ Test 4 passed: Message without role rejected');
48
- passedCount++;
49
- }
50
-
51
- // Test 5: Invalid provider
52
- testCount++;
53
- try {
54
- const llm = new LLMJS2();
55
- llm.getProvider('unknown/test');
56
- console.log('❌ Test 5 failed: Should reject unknown provider');
57
- } catch (error) {
58
- console.log('✅ Test 5 passed: Unknown provider rejected -', error.message);
59
- passedCount++;
60
- }
61
-
62
- // Test 6: LLMJS2 class instantiation
63
- testCount++;
64
- try {
65
- const llm = new LLMJS2Class();
66
- console.log('✅ Test 6 passed: LLMJS2 class instantiated successfully');
67
- passedCount++;
68
- } catch (error) {
69
- console.log('❌ Test 6 failed:', error.message);
70
- }
71
-
72
- // Test 7: Provider availability check (without API keys)
73
- testCount++;
74
- try {
75
- // Temporarily clear API keys for this test
76
- const originalKeys = {
77
- openai: process.env.OPENAI_API_KEY,
78
- ollama: process.env.OLLAMA_API_KEY,
79
- openrouter: process.env.OPEN_ROUTER_API_KEY
80
- };
81
-
82
- delete process.env.OPENAI_API_KEY;
83
- delete process.env.OLLAMA_API_KEY;
84
- delete process.env.OPEN_ROUTER_API_KEY;
85
-
86
- const llm = new LLMJS2Class();
87
- const available = llm.getAvailableProviders();
88
-
89
- // Restore original keys
90
- process.env.OPENAI_API_KEY = originalKeys.openai;
91
- process.env.OLLAMA_API_KEY = originalKeys.ollama;
92
- process.env.OPEN_ROUTER_API_KEY = originalKeys.openrouter;
93
-
94
- if (available.length === 0) {
95
- console.log('✅ Test 7 passed: No providers available without API keys');
96
- passedCount++;
97
- } else {
98
- console.log('❌ Test 7 failed: Should have no providers without API keys');
99
- }
100
- } catch (error) {
101
- console.log('❌ Test 7 failed:', error.message);
102
- }
103
-
104
- // Test 8: Model parsing
105
- testCount++;
106
- try {
107
- const llm = new LLMJS2Class();
108
- const { provider, model } = llm.parseModel('openai/gpt-3.5-turbo');
109
- if (provider === 'openai' && model === 'gpt-3.5-turbo') {
110
- console.log('✅ Test 8 passed: Model parsing works correctly');
111
- passedCount++;
112
- } else {
113
- console.log('❌ Test 8 failed: Model parsing incorrect');
114
- }
115
- } catch (error) {
116
- console.log('❌ Test 8 failed:', error.message);
117
- }
118
-
119
- // Test 9: Router creation
120
- testCount++;
121
- try {
122
- const modelList = [
123
- {
124
- model_name: 'test',
125
- llm_params: {
126
- model: 'ollama/test',
127
- api_key: 'test-key'
128
- }
129
- }
130
- ];
131
- const route = router(modelList, 'random');
132
- console.log('✅ Test 9 passed: Router created successfully');
133
- passedCount++;
134
- } catch (error) {
135
- console.log('❌ Test 9 failed:', error.message);
136
- }
137
-
138
- // Test 10: Router model selection
139
- testCount++;
140
- try {
141
- const modelList = [
142
- {
143
- model_name: 'test',
144
- llm_params: {
145
- model: 'ollama/test',
146
- api_key: 'test-key'
147
- }
148
- }
149
- ];
150
- const route = router(modelList);
151
- const selectedModel = route.selectModel('test');
152
- if (selectedModel.llm_params.model === 'ollama/test') {
153
- console.log('✅ Test 10 passed: Router model selection works');
154
- passedCount++;
155
- } else {
156
- console.log('❌ Test 10 failed: Incorrect model selected');
157
- }
158
- } catch (error) {
159
- console.log('❌ Test 10 failed:', error.message);
160
- }
161
-
162
- // Test 11: Router auto-selection
163
- testCount++;
164
- try {
165
- const modelList = [
166
- {
167
- model_name: 'test1',
168
- llm_params: {
169
- model: 'ollama/test1',
170
- api_key: 'test-key'
171
- }
172
- },
173
- {
174
- model_name: 'test2',
175
- llm_params: {
176
- model: 'ollama/test2',
177
- api_key: 'test-key'
178
- }
179
- }
180
- ];
181
- const route = router(modelList, 'random');
182
- const selectedModel = route.autoSelectModel();
183
- if (selectedModel && selectedModel.llm_params.model.startsWith('ollama/')) {
184
- console.log('✅ Test 11 passed: Router auto-selection works');
185
- passedCount++;
186
- } else {
187
- console.log('❌ Test 11 failed: Auto-selection failed');
188
- }
189
- } catch (error) {
190
- console.log('❌ Test 11 failed:', error.message);
191
- }
192
-
193
- // Test 12: Guardrails execution
194
- testCount++;
195
- try {
196
- const modelList = [
197
- {
198
- model_name: 'test',
199
- llm_params: {
200
- model: 'ollama/test',
201
- api_key: 'test-key'
202
- }
203
- }
204
- ];
205
- const route = router(modelList);
206
-
207
- route.setGuardrails([
208
- {
209
- name: 'test_guardrail',
210
- mode: 'pre_call',
211
- code: (processId, input) => {
212
- return { ...input, messages: [...input.messages, { role: 'system', content: 'Test' }] };
213
- }
214
- }
215
- ]);
216
-
217
- console.log('✅ Test 12 passed: Guardrails set successfully');
218
- passedCount++;
219
- } catch (error) {
220
- console.log('❌ Test 12 failed:', error.message);
221
- }
222
-
223
- // Test 13: Server app creation
224
- testCount++;
225
- try {
226
- const testApp = app; // Already created
227
- if (testApp && typeof testApp.use === 'function') {
228
- console.log('✅ Test 13 passed: Server app created successfully');
229
- passedCount++;
230
- } else {
231
- console.log('❌ Test 13 failed: Server app creation failed');
232
- }
233
- } catch (error) {
234
- console.log('❌ Test 13 failed:', error.message);
235
- }
236
-
237
- console.log(`\nTest Results: ${passedCount}/${testCount} tests passed`);
238
- if (passedCount === testCount) {
239
- console.log('🎉 All tests passed!');
240
- } else {
241
- console.log('⚠️ Some tests failed. Please review the implementation.');
242
- }
243
- }
244
-
245
- // Run tests
246
- runTests().catch(console.error);
File without changes
File without changes