llmjs2 1.3.9 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/README.md +31 -476
  2. package/chain/AGENT_STEP_README.md +102 -0
  3. package/chain/README.md +257 -0
  4. package/chain/WORKFLOW_README.md +85 -0
  5. package/chain/agent-step-example.js +232 -0
  6. package/chain/docs/AGENT.md +126 -0
  7. package/chain/docs/GRAPH.md +490 -0
  8. package/chain/examples.js +314 -0
  9. package/chain/index.js +31 -0
  10. package/chain/lib/agent.js +338 -0
  11. package/chain/lib/flow/agent-step.js +119 -0
  12. package/chain/lib/flow/edge.js +24 -0
  13. package/chain/lib/flow/flow.js +76 -0
  14. package/chain/lib/flow/graph.js +331 -0
  15. package/chain/lib/flow/index.js +7 -0
  16. package/chain/lib/flow/step.js +63 -0
  17. package/chain/lib/memory/in-memory.js +117 -0
  18. package/chain/lib/memory/index.js +36 -0
  19. package/chain/lib/memory/lance-memory.js +225 -0
  20. package/chain/lib/memory/sqlite-memory.js +309 -0
  21. package/chain/simple-agent-step-example.js +168 -0
  22. package/chain/workflow-example-usage.js +70 -0
  23. package/chain/workflow-example.json +59 -0
  24. package/core/README.md +485 -0
  25. package/core/cli.js +275 -0
  26. package/core/docs/BASIC_USAGE.md +62 -0
  27. package/core/docs/CLI.md +104 -0
  28. package/{docs → core/docs}/GET_STARTED.md +129 -129
  29. package/{docs → core/docs}/GUARDRAILS_GUIDE.md +734 -734
  30. package/{docs → core/docs}/README.md +47 -47
  31. package/core/docs/ROUTER_GUIDE.md +199 -0
  32. package/{docs → core/docs}/SERVER_MODE.md +358 -350
  33. package/core/index.js +115 -0
  34. package/{providers → core/providers}/ollama.js +14 -6
  35. package/{providers → core/providers}/openai.js +14 -6
  36. package/{providers → core/providers}/openrouter.js +14 -6
  37. package/core/router.js +252 -0
  38. package/{server.js → core/server.js} +15 -5
  39. package/package.json +43 -27
  40. package/cli.js +0 -195
  41. package/docs/BASIC_USAGE.md +0 -296
  42. package/docs/CLI.md +0 -455
  43. package/docs/ROUTER_GUIDE.md +0 -402
  44. package/index.js +0 -267
  45. package/router.js +0 -273
  46. package/test-completion.js +0 -99
  47. package/test.js +0 -246
  48. /package/{config.yaml → core/config.yaml} +0 -0
  49. /package/{logger.js → core/logger.js} +0 -0
package/core/index.js ADDED
@@ -0,0 +1,115 @@
1
+ const OpenAIProvider = require('./providers/openai');
2
+ const OllamaProvider = require('./providers/ollama');
3
+ const OpenRouterProvider = require('./providers/openrouter');
4
+ const { completion } = require('./completion');
5
+ const { router } = require('./router');
6
+ const { app } = require('./server');
7
+
8
+ class LLMJS2 {
9
+ constructor(config = {}) {
10
+ this.defaultProvider = config.defaultProvider;
11
+ this.providers = {
12
+ openai: new OpenAIProvider(config.openai || {}),
13
+ ollama: new OllamaProvider(config.ollama || {}),
14
+ openrouter: new OpenRouterProvider(config.openrouter || {})
15
+ };
16
+ }
17
+
18
+ parseModel(modelString) {
19
+ if (!modelString || typeof modelString !== 'string') {
20
+ return { provider: null, model: null };
21
+ }
22
+
23
+ const firstSlashIndex = modelString.indexOf('/');
24
+ if (firstSlashIndex !== -1) {
25
+ return {
26
+ provider: modelString.substring(0, firstSlashIndex),
27
+ model: modelString.substring(firstSlashIndex + 1)
28
+ };
29
+ }
30
+
31
+ return { provider: null, model: modelString };
32
+ }
33
+
34
+ getAvailableProviders() {
35
+ const available = [];
36
+
37
+ const openaiKey = process.env.OPENAI_API_KEY || this.providers.openai.apiKey;
38
+ const ollamaKey = process.env.OLLAMA_API_KEY || this.providers.ollama.apiKey;
39
+ const openrouterKey = process.env.OPEN_ROUTER_API_KEY || this.providers.openrouter.apiKey;
40
+
41
+ if (openaiKey && typeof openaiKey === 'string' && openaiKey.trim() && !openaiKey.startsWith(':')) {
42
+ available.push('openai');
43
+ }
44
+ if (ollamaKey && typeof ollamaKey === 'string' && ollamaKey.trim() && !ollamaKey.startsWith(':')) {
45
+ available.push('ollama');
46
+ }
47
+ if (openrouterKey && typeof openrouterKey === 'string' && openrouterKey.trim() && !openrouterKey.startsWith(':')) {
48
+ available.push('openrouter');
49
+ }
50
+
51
+ return available;
52
+ }
53
+
54
+ getAutoProvider() {
55
+ const availableProviders = this.getAvailableProviders();
56
+ if (availableProviders.length === 0) {
57
+ throw new Error('No API keys found. Set OLLAMA_API_KEY, OPEN_ROUTER_API_KEY, or OPENAI_API_KEY environment variables.');
58
+ }
59
+
60
+ const randomIndex = Math.floor(Math.random() * availableProviders.length);
61
+ const providerName = availableProviders[randomIndex];
62
+ const provider = this.providers[providerName];
63
+
64
+ return {
65
+ provider,
66
+ model: provider.defaultModel
67
+ };
68
+ }
69
+
70
+ getProvider(modelString) {
71
+ const { provider: specifiedProvider, model } = this.parseModel(modelString);
72
+
73
+ if (specifiedProvider) {
74
+ const provider = this.providers[specifiedProvider];
75
+ if (!provider) {
76
+ throw new Error(`Unknown provider: ${specifiedProvider}`);
77
+ }
78
+ return { provider, model };
79
+ }
80
+
81
+ const availableProviders = this.getAvailableProviders();
82
+ if (availableProviders.length === 0) {
83
+ throw new Error('No API keys configured. Set OPENAI_API_KEY, OLLAMA_API_KEY, or OPEN_ROUTER_API_KEY environment variables.');
84
+ }
85
+
86
+ const providerName = this.defaultProvider && availableProviders.includes(this.defaultProvider)
87
+ ? this.defaultProvider
88
+ : availableProviders[0];
89
+ const provider = this.providers[providerName];
90
+
91
+ return {
92
+ provider,
93
+ model: model || provider.defaultModel
94
+ };
95
+ }
96
+
97
+ completion(input) {
98
+ return completion(input, {
99
+ defaultProvider: this.defaultProvider,
100
+ openai: { apiKey: this.providers.openai.apiKey, baseURL: this.providers.openai.baseURL, timeout: this.providers.openai.timeout },
101
+ ollama: { apiKey: this.providers.ollama.apiKey, baseURL: this.providers.ollama.baseURL, timeout: this.providers.ollama.timeout },
102
+ openrouter: { apiKey: this.providers.openrouter.apiKey, baseURL: this.providers.openrouter.baseURL, timeout: this.providers.openrouter.timeout }
103
+ });
104
+ }
105
+ }
106
+
107
+ module.exports = {
108
+ completion,
109
+ LLMJS2,
110
+ router,
111
+ app,
112
+ OpenAIProvider,
113
+ OllamaProvider,
114
+ OpenRouterProvider
115
+ };
@@ -8,14 +8,18 @@ class OllamaProvider {
8
8
  this.timeout = config.timeout || 120000; // 2 minutes (LLMs can be slow)
9
9
  }
10
10
 
11
- async makeRequest(data) {
12
- if (!this.apiKey) {
11
+ async makeRequest(data, requestOptions = {}) {
12
+ const apiKey = requestOptions.apiKey || this.apiKey;
13
+ const baseURL = requestOptions.baseURL || this.baseURL;
14
+ const timeout = requestOptions.timeout || this.timeout;
15
+
16
+ if (!apiKey) {
13
17
  throw new Error('Ollama API key is required. Set OLLAMA_API_KEY environment variable or pass apiKey in config.');
14
18
  }
15
19
 
16
20
  const postData = JSON.stringify(data);
17
21
 
18
- const parsedUrl = new URL(this.baseURL);
22
+ const parsedUrl = new URL(baseURL);
19
23
 
20
24
  const options = {
21
25
  hostname: parsedUrl.hostname,
@@ -24,7 +28,7 @@ class OllamaProvider {
24
28
  method: 'POST',
25
29
  headers: {
26
30
  'Content-Type': 'application/json',
27
- 'Authorization': `Bearer ${this.apiKey}`,
31
+ 'Authorization': `Bearer ${apiKey}`,
28
32
  'Content-Length': Buffer.byteLength(postData)
29
33
  }
30
34
  };
@@ -56,7 +60,7 @@ class OllamaProvider {
56
60
  reject(new Error(`Ollama request failed: ${error.message}`));
57
61
  });
58
62
 
59
- req.setTimeout(this.timeout, () => {
63
+ req.setTimeout(timeout, () => {
60
64
  req.destroy();
61
65
  reject(new Error('Ollama request timed out'));
62
66
  });
@@ -100,7 +104,11 @@ class OllamaProvider {
100
104
  });
101
105
  }
102
106
 
103
- const response = await this.makeRequest(data);
107
+ const response = await this.makeRequest(data, {
108
+ apiKey: options.apiKey,
109
+ baseURL: options.baseURL,
110
+ timeout: options.timeout
111
+ });
104
112
 
105
113
  return {
106
114
  content: response.message?.content || '',
@@ -8,12 +8,16 @@ class OpenAIProvider {
8
8
  this.timeout = config.timeout || 60000; // 60 seconds
9
9
  }
10
10
 
11
- async makeRequest(endpoint, data) {
12
- if (!this.apiKey) {
11
+ async makeRequest(endpoint, data, requestOptions = {}) {
12
+ const apiKey = requestOptions.apiKey || this.apiKey;
13
+ const baseURL = requestOptions.baseURL || this.baseURL;
14
+ const timeout = requestOptions.timeout || this.timeout;
15
+
16
+ if (!apiKey) {
13
17
  throw new Error('OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass apiKey in config.');
14
18
  }
15
19
 
16
- const requestUrl = `${this.baseURL}${endpoint}`;
20
+ const requestUrl = `${baseURL}${endpoint}`;
17
21
  const postData = JSON.stringify(data);
18
22
 
19
23
  const parsedUrl = new URL(requestUrl);
@@ -25,7 +29,7 @@ class OpenAIProvider {
25
29
  method: 'POST',
26
30
  headers: {
27
31
  'Content-Type': 'application/json',
28
- 'Authorization': `Bearer ${this.apiKey}`,
32
+ 'Authorization': `Bearer ${apiKey}`,
29
33
  'Content-Length': Buffer.byteLength(postData)
30
34
  }
31
35
  };
@@ -57,7 +61,7 @@ class OpenAIProvider {
57
61
  reject(new Error(`OpenAI request failed: ${error.message}`));
58
62
  });
59
63
 
60
- req.setTimeout(this.timeout, () => {
64
+ req.setTimeout(timeout, () => {
61
65
  req.destroy();
62
66
  reject(new Error('OpenAI request timed out'));
63
67
  });
@@ -88,7 +92,11 @@ class OpenAIProvider {
88
92
  }
89
93
  });
90
94
 
91
- const response = await this.makeRequest('/chat/completions', data);
95
+ const response = await this.makeRequest('/chat/completions', data, {
96
+ apiKey: options.apiKey,
97
+ baseURL: options.baseURL,
98
+ timeout: options.timeout
99
+ });
92
100
 
93
101
  return {
94
102
  content: response.choices[0]?.message?.content || '',
@@ -9,14 +9,18 @@ class OpenRouterProvider {
9
9
  this.config = config; // Store entire config for additional properties like referer, title
10
10
  }
11
11
 
12
- async makeRequest(data) {
13
- if (!this.apiKey) {
12
+ async makeRequest(data, requestOptions = {}) {
13
+ const apiKey = requestOptions.apiKey || this.apiKey;
14
+ const baseURL = requestOptions.baseURL || this.baseURL;
15
+ const timeout = requestOptions.timeout || this.timeout;
16
+
17
+ if (!apiKey) {
14
18
  throw new Error('OpenRouter API key is required. Set OPEN_ROUTER_API_KEY environment variable or pass apiKey in config.');
15
19
  }
16
20
 
17
21
  const postData = JSON.stringify(data);
18
22
 
19
- const parsedUrl = new URL(this.baseURL);
23
+ const parsedUrl = new URL(baseURL);
20
24
 
21
25
  const options = {
22
26
  hostname: parsedUrl.hostname,
@@ -25,7 +29,7 @@ class OpenRouterProvider {
25
29
  method: 'POST',
26
30
  headers: {
27
31
  'Content-Type': 'application/json',
28
- 'Authorization': `Bearer ${this.apiKey}`,
32
+ 'Authorization': `Bearer ${apiKey}`,
29
33
  'Content-Length': Buffer.byteLength(postData),
30
34
  'HTTP-Referer': this.config.referer || process.env.OPEN_ROUTER_REFERER || '',
31
35
  'X-Title': this.config.title || process.env.OPEN_ROUTER_TITLE || ''
@@ -63,7 +67,7 @@ class OpenRouterProvider {
63
67
  reject(new Error(`OpenRouter request failed: ${error.message}`));
64
68
  });
65
69
 
66
- req.setTimeout(this.timeout, () => {
70
+ req.setTimeout(timeout, () => {
67
71
  req.destroy();
68
72
  reject(new Error('OpenRouter request timed out'));
69
73
  });
@@ -97,7 +101,11 @@ class OpenRouterProvider {
97
101
  }
98
102
  });
99
103
 
100
- const response = await this.makeRequest(data);
104
+ const response = await this.makeRequest(data, {
105
+ apiKey: options.apiKey,
106
+ baseURL: options.baseURL,
107
+ timeout: options.timeout
108
+ });
101
109
 
102
110
  return {
103
111
  content: response.choices[0]?.message?.content || '',
package/core/router.js ADDED
@@ -0,0 +1,252 @@
1
+ const logger = require('./logger');
2
+ const { completion } = require('./completion');
3
+
4
+ function normalizeStrategy(strategy) {
5
+ const supportedStrategies = new Set(['default', 'random', 'sequential']);
6
+ return supportedStrategies.has(strategy) ? strategy : 'default';
7
+ }
8
+
9
+ function resolveApiKey(apiKey) {
10
+ if (typeof apiKey === 'string' && apiKey.startsWith('os.environ/')) {
11
+ const envVar = apiKey.replace('os.environ/', '');
12
+ return process.env[envVar] || apiKey;
13
+ }
14
+ return apiKey;
15
+ }
16
+
17
+ function normalizeModelList(modelList) {
18
+ if (!Array.isArray(modelList) || modelList.length === 0) {
19
+ throw new Error('Router requires a non-empty model list');
20
+ }
21
+
22
+ return modelList.map((model, index) => {
23
+ if (!model || typeof model !== 'object') {
24
+ throw new Error(`Model at index ${index} must be an object`);
25
+ }
26
+ if (!model.model_name) {
27
+ throw new Error(`Model at index ${index} is missing model_name`);
28
+ }
29
+ if (!model.llm_params || typeof model.llm_params !== 'object') {
30
+ throw new Error(`Model '${model.model_name}' is missing llm_params`);
31
+ }
32
+ if (!model.llm_params.model) {
33
+ throw new Error(`Model '${model.model_name}' is missing llm_params.model`);
34
+ }
35
+
36
+ return {
37
+ model_name: model.model_name,
38
+ llm_params: {
39
+ model: model.llm_params.model,
40
+ api_key: resolveApiKey(model.llm_params.api_key),
41
+ api_base: model.llm_params.api_base
42
+ }
43
+ };
44
+ });
45
+ }
46
+
47
+ function buildModelsByName(modelList) {
48
+ return modelList.reduce((accumulator, model) => {
49
+ if (!accumulator[model.model_name]) {
50
+ accumulator[model.model_name] = [];
51
+ }
52
+ accumulator[model.model_name].push(model);
53
+ return accumulator;
54
+ }, {});
55
+ }
56
+
57
+ function router(modelList, strategy = 'default', options = {}) {
58
+ const normalizedModelList = normalizeModelList(modelList);
59
+ const normalizedStrategy = normalizeStrategy(strategy);
60
+ const allowUnsafeGuardrails = Boolean(options.allowUnsafeGuardrails) || process.env.LLMJS2_ALLOW_UNSAFE_GUARDRAILS === 'true';
61
+ const guardrails = [];
62
+ const modelsByName = buildModelsByName(normalizedModelList);
63
+ let sequentialIndex = 0;
64
+
65
+ function getRandomItem(items) {
66
+ const randomIndex = Math.floor(Math.random() * items.length);
67
+ return items[randomIndex];
68
+ }
69
+
70
+ function getNextSequentialModel() {
71
+ const selectedModel = normalizedModelList[sequentialIndex];
72
+ sequentialIndex = (sequentialIndex + 1) % normalizedModelList.length;
73
+ return selectedModel;
74
+ }
75
+
76
+ function ensureModelsAvailable() {
77
+ if (normalizedModelList.length === 0) {
78
+ throw new Error('Router has no configured models');
79
+ }
80
+ }
81
+
82
+ function selectModel(modelName) {
83
+ ensureModelsAvailable();
84
+
85
+ if (!modelName) {
86
+ return autoSelectModel();
87
+ }
88
+
89
+ const availableModels = modelsByName[modelName];
90
+ if (!availableModels || availableModels.length === 0) {
91
+ throw new Error(`Model '${modelName}' not found in router configuration`);
92
+ }
93
+
94
+ if (availableModels.length === 1) {
95
+ return availableModels[0];
96
+ }
97
+
98
+ return getRandomItem(availableModels);
99
+ }
100
+
101
+ function autoSelectModel() {
102
+ ensureModelsAvailable();
103
+
104
+ switch (normalizedStrategy) {
105
+ case 'random':
106
+ return getRandomItem(normalizedModelList);
107
+ case 'sequential':
108
+ case 'default':
109
+ default:
110
+ return getNextSequentialModel();
111
+ }
112
+ }
113
+
114
+ function normalizeCompletionInput(input = {}) {
115
+ return {
116
+ modelName: input.model,
117
+ messages: input.messages || [],
118
+ stop: input.stop,
119
+ tools: input.tools,
120
+ toolChoice: input.tool_choice ?? input.toolChoice,
121
+ timeout: input.timeout,
122
+ apiKey: input.apiKey,
123
+ host: input.host || input.baseURL || input.baseUrl,
124
+ final: input.final ?? true
125
+ };
126
+ }
127
+
128
+ async function executeGuardrail(guardrail, processId, data) {
129
+ if (typeof guardrail.code === 'string') {
130
+ if (!allowUnsafeGuardrails) {
131
+ throw new Error(
132
+ `Guardrail '${guardrail.name}' uses string code, but unsafe execution is disabled. Set router option allowUnsafeGuardrails=true or LLMJS2_ALLOW_UNSAFE_GUARDRAILS=true to enable.`
133
+ );
134
+ }
135
+
136
+ const func = new Function('processId', 'data', `return (${guardrail.code})(processId, data)`);
137
+ return await func(processId, data);
138
+ }
139
+
140
+ if (typeof guardrail.code === 'function') {
141
+ return await guardrail.code(processId, data);
142
+ }
143
+
144
+ throw new Error(`Invalid guardrail code for '${guardrail.name}'`);
145
+ }
146
+
147
+ async function applyGuardrails(mode, processId, data) {
148
+ const modeLabel = mode === 'pre_call' ? 'Pre-call' : 'Post-call';
149
+ let currentValue = data;
150
+
151
+ for (const guardrail of guardrails) {
152
+ if (guardrail.mode !== mode) {
153
+ continue;
154
+ }
155
+
156
+ try {
157
+ const processed = await executeGuardrail(guardrail, processId, currentValue);
158
+ if (processed === null || processed === undefined) {
159
+ throw new Error(`Guardrail '${guardrail.name}' returned null/undefined`);
160
+ }
161
+ currentValue = processed;
162
+ } catch (error) {
163
+ throw new Error(`${modeLabel} guardrail '${guardrail.name}' failed: ${error.message}`);
164
+ }
165
+ }
166
+
167
+ return currentValue;
168
+ }
169
+
170
+ async function completionWithModel(modelConfig, input) {
171
+ return completion({
172
+ ...input,
173
+ model: modelConfig.llm_params.model,
174
+ apiKey: input.apiKey || modelConfig.llm_params.api_key,
175
+ host: input.host || modelConfig.llm_params.api_base,
176
+ final: false
177
+ });
178
+ }
179
+
180
+ function generateProcessId() {
181
+ return `req_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
182
+ }
183
+
184
+ return {
185
+ setGuardrails(nextGuardrails) {
186
+ guardrails.length = 0;
187
+ if (Array.isArray(nextGuardrails)) {
188
+ guardrails.push(...nextGuardrails);
189
+ }
190
+ },
191
+
192
+ addGuardrail(guardrail) {
193
+ guardrails.push(guardrail);
194
+ },
195
+
196
+ getAvailableModels() {
197
+ return Object.keys(modelsByName);
198
+ },
199
+
200
+ getModelStats() {
201
+ const stats = {};
202
+ Object.keys(modelsByName).forEach((modelName) => {
203
+ stats[modelName] = modelsByName[modelName].length;
204
+ });
205
+ return stats;
206
+ },
207
+
208
+ selectModel,
209
+
210
+ autoSelectModel,
211
+
212
+ executeGuardrail,
213
+
214
+ async completion(input = {}) {
215
+ const processId = generateProcessId();
216
+
217
+ try {
218
+ const normalizedInput = normalizeCompletionInput(input);
219
+ const selectedModel = selectModel(normalizedInput.modelName);
220
+
221
+ logger.debug('Selected model', {
222
+ processId,
223
+ model: selectedModel.llm_params.model,
224
+ modelName: selectedModel.model_name
225
+ });
226
+
227
+ const { modelName, ...completionInput } = normalizedInput;
228
+ const preparedInput = {
229
+ model: selectedModel.llm_params.model,
230
+ ...completionInput
231
+ };
232
+
233
+ const processedInput = await applyGuardrails('pre_call', processId, preparedInput);
234
+ const result = await completionWithModel(selectedModel, processedInput);
235
+ const finalResult = await applyGuardrails('post_call', processId, result);
236
+
237
+ return {
238
+ result: finalResult,
239
+ selectedModel: selectedModel.llm_params.model,
240
+ selectedModelName: selectedModel.model_name
241
+ };
242
+ } catch (error) {
243
+ logger.error('Router error', { processId, error: error.message });
244
+ throw error;
245
+ }
246
+ }
247
+ };
248
+ }
249
+
250
+ module.exports = {
251
+ router
252
+ };
@@ -1,5 +1,6 @@
1
1
  const http = require('http');
2
2
  const url = require('url');
3
+ const logger = require('./logger');
3
4
 
4
5
  class Server {
5
6
  constructor(options = {}) {
@@ -101,8 +102,8 @@ class Server {
101
102
  }
102
103
 
103
104
  for (const message of body.messages) {
104
- if (!message.role || !['system', 'user', 'assistant'].includes(message.role)) {
105
- return { valid: false, error: 'Each message must have a valid role (system, user, or assistant)' };
105
+ if (!message.role || !['system', 'user', 'assistant', 'tool'].includes(message.role)) {
106
+ return { valid: false, error: 'Each message must have a valid role (system, user, assistant, or tool)' };
106
107
  }
107
108
  if (typeof message.content !== 'string') {
108
109
  return { valid: false, error: 'Each message must have string content' };
@@ -123,12 +124,12 @@ class Server {
123
124
  });
124
125
 
125
126
  const routerResponse = await this.router.completion(body);
126
- const { result, selectedModel, selectedModelName } = routerResponse;
127
+ const { result, selectedModel } = routerResponse;
127
128
 
128
- // Return format with metadata plus message array
129
129
  const assistantMessage = {
130
130
  role: 'assistant',
131
- content: typeof result === 'string' ? result : result.content || ''
131
+ content: typeof result === 'string' ? result : result.content || '',
132
+ tool_calls: typeof result === 'object' ? result.tool_calls : undefined
132
133
  };
133
134
 
134
135
  // Include original messages plus assistant response
@@ -139,6 +140,15 @@ class Server {
139
140
  object: 'chat.completion',
140
141
  created: Math.floor(Date.now() / 1000),
141
142
  model: body.model || selectedModel, // Use selected model if auto-selected
143
+ choices: [
144
+ {
145
+ index: 0,
146
+ message: assistantMessage,
147
+ finish_reason: typeof result === 'object' ? (result.finishReason || 'stop') : 'stop'
148
+ }
149
+ ],
150
+ usage: typeof result === 'object' ? result.usage : undefined,
151
+ // Kept for backward compatibility with older llmjs2 clients
142
152
  messages: messages
143
153
  };
144
154
  }
package/package.json CHANGED
@@ -1,48 +1,64 @@
1
1
  {
2
2
  "name": "llmjs2",
3
- "version": "1.3.9",
4
- "description": "A unified Node.js library for connecting to multiple LLM providers: OpenAI, Ollama, and OpenRouter",
5
- "main": "index.js",
6
- "type": "commonjs",
3
+ "version": "1.6.1",
4
+ "description": "A unified Node.js library for connecting to multiple Large Language Model (LLM) providers: OpenAI, Ollama, and OpenRouter.",
5
+ "main": "core/index.js",
6
+ "bin": {
7
+ "llmjs2": "core/cli.js"
8
+ },
7
9
  "scripts": {
8
- "test": "node test.js",
9
- "start": "node cli.js",
10
- "server": "node cli.js",
11
- "router:example": "node server-config.js",
12
- "chat": "node chat-app.js",
13
- "test:completion": "node test-completion.js",
10
+ "test": "node core/test.js",
11
+ "test:completion": "node core/test-completion.js",
12
+ "start": "node core/cli.js",
14
13
  "lint": "echo 'No linting configured'",
15
- "typecheck": "echo 'No TypeScript configured'"
14
+ "typecheck": "echo 'No type checking configured'"
16
15
  },
17
16
  "keywords": [
18
17
  "llm",
19
- "llmjs",
20
- "llmjs2",
18
+ "ai",
21
19
  "openai",
22
20
  "ollama",
23
21
  "openrouter",
24
- "ai",
25
22
  "chatgpt",
26
- "completions",
23
+ "gpt",
24
+ "llmjs2",
25
+ "llmjs",
26
+ "language-model",
27
27
  "unified-api"
28
28
  ],
29
- "author": "Your Name",
29
+ "author": "",
30
30
  "license": "MIT",
31
31
  "repository": {
32
32
  "type": "git",
33
- "url": "https://github.com/littlellmjs/llmjs2.git"
33
+ "url": ""
34
34
  },
35
- "bugs": {
36
- "url": "https://github.com/littlellmjs/llmjs2/issues"
35
+ "dependencies": {
36
+ "yaml": "^2.0.0"
37
37
  },
38
- "homepage": "https://github.com/littlellmjs/llmjs2#readme",
39
38
  "engines": {
40
39
  "node": ">=14.0.0"
41
40
  },
42
- "dependencies": {
43
- "yaml": "^2.3.4"
44
- },
45
- "bin": {
46
- "llmjs2": "./cli.js"
47
- }
48
- }
41
+ "files": [
42
+ "chain/index.js",
43
+ "chain/lib/",
44
+ "chain/docs/",
45
+ "chain/examples.js",
46
+ "chain/agent-step-example.js",
47
+ "chain/simple-agent-step-example.js",
48
+ "chain/workflow-example-usage.js",
49
+ "chain/workflow-example.json",
50
+ "chain/AGENT_STEP_README.md",
51
+ "chain/WORKFLOW_README.md",
52
+ "chain/README.md",
53
+ "core/cli.js",
54
+ "core/config.yaml",
55
+ "core/index.js",
56
+ "core/logger.js",
57
+ "core/router.js",
58
+ "core/server.js",
59
+ "core/providers/",
60
+ "core/docs/",
61
+ "core/README.md",
62
+ "README.md"
63
+ ]
64
+ }