llmjs2 1.3.9 → 1.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +31 -476
- package/chain/AGENT_STEP_README.md +102 -0
- package/chain/README.md +257 -0
- package/chain/WORKFLOW_README.md +85 -0
- package/chain/agent-step-example.js +232 -0
- package/chain/docs/AGENT.md +126 -0
- package/chain/docs/GRAPH.md +490 -0
- package/chain/examples.js +314 -0
- package/chain/index.js +31 -0
- package/chain/lib/agent.js +338 -0
- package/chain/lib/flow/agent-step.js +119 -0
- package/chain/lib/flow/edge.js +24 -0
- package/chain/lib/flow/flow.js +76 -0
- package/chain/lib/flow/graph.js +331 -0
- package/chain/lib/flow/index.js +7 -0
- package/chain/lib/flow/step.js +63 -0
- package/chain/lib/memory/in-memory.js +117 -0
- package/chain/lib/memory/index.js +36 -0
- package/chain/lib/memory/lance-memory.js +225 -0
- package/chain/lib/memory/sqlite-memory.js +309 -0
- package/chain/simple-agent-step-example.js +168 -0
- package/chain/workflow-example-usage.js +70 -0
- package/chain/workflow-example.json +59 -0
- package/core/README.md +485 -0
- package/core/cli.js +275 -0
- package/core/docs/BASIC_USAGE.md +62 -0
- package/core/docs/CLI.md +104 -0
- package/{docs → core/docs}/GET_STARTED.md +129 -129
- package/{docs → core/docs}/GUARDRAILS_GUIDE.md +734 -734
- package/{docs → core/docs}/README.md +47 -47
- package/core/docs/ROUTER_GUIDE.md +199 -0
- package/{docs → core/docs}/SERVER_MODE.md +358 -350
- package/core/index.js +115 -0
- package/{providers → core/providers}/ollama.js +14 -6
- package/{providers → core/providers}/openai.js +14 -6
- package/core/providers/openrouter.js +206 -0
- package/core/router.js +252 -0
- package/{server.js → core/server.js} +15 -5
- package/package.json +46 -27
- package/cli.js +0 -195
- package/docs/BASIC_USAGE.md +0 -296
- package/docs/CLI.md +0 -455
- package/docs/ROUTER_GUIDE.md +0 -402
- package/index.js +0 -267
- package/providers/openrouter.js +0 -113
- package/router.js +0 -273
- package/test-completion.js +0 -99
- package/test.js +0 -246
- /package/{config.yaml → core/config.yaml} +0 -0
- /package/{logger.js → core/logger.js} +0 -0
package/providers/openrouter.js
DELETED
|
@@ -1,113 +0,0 @@
|
|
|
1
|
-
const https = require('https');
|
|
2
|
-
|
|
3
|
-
class OpenRouterProvider {
|
|
4
|
-
constructor(config = {}) {
|
|
5
|
-
this.baseURL = config.baseURL || process.env.OPEN_ROUTER_BASE_URL || 'https://openrouter.ai/api/v1/chat/completions';
|
|
6
|
-
this.apiKey = config.apiKey || process.env.OPEN_ROUTER_API_KEY;
|
|
7
|
-
this.defaultModel = config.defaultModel || process.env.OPEN_ROUTER_DEFAULT_MODEL || 'openrouter/free';
|
|
8
|
-
this.timeout = config.timeout || 60000; // 60 seconds
|
|
9
|
-
this.config = config; // Store entire config for additional properties like referer, title
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
async makeRequest(data) {
|
|
13
|
-
if (!this.apiKey) {
|
|
14
|
-
throw new Error('OpenRouter API key is required. Set OPEN_ROUTER_API_KEY environment variable or pass apiKey in config.');
|
|
15
|
-
}
|
|
16
|
-
|
|
17
|
-
const postData = JSON.stringify(data);
|
|
18
|
-
|
|
19
|
-
const parsedUrl = new URL(this.baseURL);
|
|
20
|
-
|
|
21
|
-
const options = {
|
|
22
|
-
hostname: parsedUrl.hostname,
|
|
23
|
-
port: parsedUrl.port || 443,
|
|
24
|
-
path: parsedUrl.pathname + parsedUrl.search,
|
|
25
|
-
method: 'POST',
|
|
26
|
-
headers: {
|
|
27
|
-
'Content-Type': 'application/json',
|
|
28
|
-
'Authorization': `Bearer ${this.apiKey}`,
|
|
29
|
-
'Content-Length': Buffer.byteLength(postData),
|
|
30
|
-
'HTTP-Referer': this.config.referer || process.env.OPEN_ROUTER_REFERER || '',
|
|
31
|
-
'X-Title': this.config.title || process.env.OPEN_ROUTER_TITLE || ''
|
|
32
|
-
}
|
|
33
|
-
};
|
|
34
|
-
|
|
35
|
-
// Remove empty headers
|
|
36
|
-
if (!options.headers['HTTP-Referer']) delete options.headers['HTTP-Referer'];
|
|
37
|
-
if (!options.headers['X-Title']) delete options.headers['X-Title'];
|
|
38
|
-
|
|
39
|
-
return new Promise((resolve, reject) => {
|
|
40
|
-
const req = https.request(options, (res) => {
|
|
41
|
-
let body = '';
|
|
42
|
-
|
|
43
|
-
res.on('data', (chunk) => {
|
|
44
|
-
body += chunk;
|
|
45
|
-
});
|
|
46
|
-
|
|
47
|
-
res.on('end', () => {
|
|
48
|
-
try {
|
|
49
|
-
if (res.statusCode >= 200 && res.statusCode < 300) {
|
|
50
|
-
const response = JSON.parse(body);
|
|
51
|
-
resolve(response);
|
|
52
|
-
} else {
|
|
53
|
-
const error = JSON.parse(body);
|
|
54
|
-
reject(new Error(`OpenRouter API error (${res.statusCode}): ${error.error?.message || 'Unknown error'}`));
|
|
55
|
-
}
|
|
56
|
-
} catch (parseError) {
|
|
57
|
-
reject(new Error(`Failed to parse OpenRouter response: ${parseError.message}`));
|
|
58
|
-
}
|
|
59
|
-
});
|
|
60
|
-
});
|
|
61
|
-
|
|
62
|
-
req.on('error', (error) => {
|
|
63
|
-
reject(new Error(`OpenRouter request failed: ${error.message}`));
|
|
64
|
-
});
|
|
65
|
-
|
|
66
|
-
req.setTimeout(this.timeout, () => {
|
|
67
|
-
req.destroy();
|
|
68
|
-
reject(new Error('OpenRouter request timed out'));
|
|
69
|
-
});
|
|
70
|
-
|
|
71
|
-
req.write(postData);
|
|
72
|
-
req.end();
|
|
73
|
-
});
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
async createCompletion(messages, options = {}) {
|
|
77
|
-
const data = {
|
|
78
|
-
model: options.model || this.defaultModel,
|
|
79
|
-
messages: messages,
|
|
80
|
-
temperature: options.temperature || 0.7,
|
|
81
|
-
max_tokens: options.maxTokens,
|
|
82
|
-
top_p: options.topP,
|
|
83
|
-
frequency_penalty: options.frequencyPenalty,
|
|
84
|
-
presence_penalty: options.presencePenalty,
|
|
85
|
-
stop: options.stop,
|
|
86
|
-
tools: options.tools,
|
|
87
|
-
tool_choice: options.toolChoice,
|
|
88
|
-
transforms: options.transforms,
|
|
89
|
-
models: options.models,
|
|
90
|
-
route: options.route
|
|
91
|
-
};
|
|
92
|
-
|
|
93
|
-
// Remove undefined values
|
|
94
|
-
Object.keys(data).forEach(key => {
|
|
95
|
-
if (data[key] === undefined) {
|
|
96
|
-
delete data[key];
|
|
97
|
-
}
|
|
98
|
-
});
|
|
99
|
-
|
|
100
|
-
const response = await this.makeRequest(data);
|
|
101
|
-
|
|
102
|
-
return {
|
|
103
|
-
content: response.choices[0]?.message?.content || '',
|
|
104
|
-
role: response.choices[0]?.message?.role || 'assistant',
|
|
105
|
-
tool_calls: response.choices[0]?.message?.tool_calls,
|
|
106
|
-
usage: response.usage,
|
|
107
|
-
model: response.model,
|
|
108
|
-
finishReason: response.choices[0]?.finish_reason
|
|
109
|
-
};
|
|
110
|
-
}
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
module.exports = OpenRouterProvider;
|
package/router.js
DELETED
|
@@ -1,273 +0,0 @@
|
|
|
1
|
-
const OpenAIProvider = require('./providers/openai');
|
|
2
|
-
const OllamaProvider = require('./providers/ollama');
|
|
3
|
-
const OpenRouterProvider = require('./providers/openrouter');
|
|
4
|
-
const logger = require('./logger');
|
|
5
|
-
|
|
6
|
-
class Router {
|
|
7
|
-
constructor(modelList, strategy = 'default') {
|
|
8
|
-
this.modelList = this.normalizeModelList(modelList);
|
|
9
|
-
this.strategy = strategy;
|
|
10
|
-
this.providers = {
|
|
11
|
-
openai: new OpenAIProvider(),
|
|
12
|
-
ollama: new OllamaProvider(),
|
|
13
|
-
openrouter: new OpenRouterProvider()
|
|
14
|
-
};
|
|
15
|
-
this.guardrails = [];
|
|
16
|
-
this.sequentialIndex = 0;
|
|
17
|
-
|
|
18
|
-
// Group models by model_name for load balancing
|
|
19
|
-
this.modelsByName = {};
|
|
20
|
-
this.modelList.forEach(model => {
|
|
21
|
-
if (!this.modelsByName[model.model_name]) {
|
|
22
|
-
this.modelsByName[model.model_name] = [];
|
|
23
|
-
}
|
|
24
|
-
this.modelsByName[model.model_name].push(model);
|
|
25
|
-
});
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
normalizeModelList(modelList) {
|
|
29
|
-
return modelList.map(model => ({
|
|
30
|
-
model_name: model.model_name,
|
|
31
|
-
llm_params: {
|
|
32
|
-
model: model.llm_params.model,
|
|
33
|
-
api_key: this.resolveApiKey(model.llm_params.api_key),
|
|
34
|
-
api_base: model.llm_params.api_base
|
|
35
|
-
}
|
|
36
|
-
}));
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
resolveApiKey(apiKey) {
|
|
40
|
-
if (typeof apiKey === 'string' && apiKey.startsWith('os.environ/')) {
|
|
41
|
-
const envVar = apiKey.replace('os.environ/', '');
|
|
42
|
-
return process.env[envVar] || apiKey;
|
|
43
|
-
}
|
|
44
|
-
return apiKey;
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
setGuardrails(guardrails) {
|
|
48
|
-
this.guardrails = guardrails || [];
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
selectModel(modelName) {
|
|
52
|
-
if (!modelName) {
|
|
53
|
-
// Auto-routing based on strategy
|
|
54
|
-
return this.autoSelectModel();
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
// Direct model selection with load balancing
|
|
58
|
-
const availableModels = this.modelsByName[modelName];
|
|
59
|
-
if (!availableModels || availableModels.length === 0) {
|
|
60
|
-
throw new Error(`Model '${modelName}' not found in router configuration`);
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
if (availableModels.length === 1) {
|
|
64
|
-
return availableModels[0];
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
// Load balancing for multiple models with same name
|
|
68
|
-
const randomIndex = Math.floor(Math.random() * availableModels.length);
|
|
69
|
-
return availableModels[randomIndex];
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
autoSelectModel() {
|
|
73
|
-
const allModels = this.modelList;
|
|
74
|
-
|
|
75
|
-
switch (this.strategy) {
|
|
76
|
-
case 'random':
|
|
77
|
-
const randomIndex = Math.floor(Math.random() * allModels.length);
|
|
78
|
-
return allModels[randomIndex];
|
|
79
|
-
|
|
80
|
-
case 'sequential':
|
|
81
|
-
const model = allModels[this.sequentialIndex];
|
|
82
|
-
this.sequentialIndex = (this.sequentialIndex + 1) % allModels.length;
|
|
83
|
-
return model;
|
|
84
|
-
|
|
85
|
-
case 'default':
|
|
86
|
-
default:
|
|
87
|
-
// Use sequential selection for auto-routing
|
|
88
|
-
const selectedModel = allModels[this.sequentialIndex];
|
|
89
|
-
this.sequentialIndex = (this.sequentialIndex + 1) % allModels.length;
|
|
90
|
-
return selectedModel;
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
async applyPreGuardrails(processId, input) {
|
|
95
|
-
let currentInput = input;
|
|
96
|
-
|
|
97
|
-
for (const guardrail of this.guardrails) {
|
|
98
|
-
if (guardrail.mode === 'pre_call') {
|
|
99
|
-
try {
|
|
100
|
-
const result = await this.executeGuardrail(guardrail, processId, currentInput);
|
|
101
|
-
if (result === null || result === undefined) {
|
|
102
|
-
throw new Error(`Guardrail '${guardrail.name}' returned null/undefined`);
|
|
103
|
-
}
|
|
104
|
-
currentInput = result;
|
|
105
|
-
} catch (error) {
|
|
106
|
-
throw new Error(`Pre-call guardrail '${guardrail.name}' failed: ${error.message}`);
|
|
107
|
-
}
|
|
108
|
-
}
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
return currentInput;
|
|
112
|
-
}
|
|
113
|
-
|
|
114
|
-
async applyPostGuardrails(processId, result) {
|
|
115
|
-
let currentResult = result;
|
|
116
|
-
|
|
117
|
-
for (const guardrail of this.guardrails) {
|
|
118
|
-
if (guardrail.mode === 'post_call') {
|
|
119
|
-
try {
|
|
120
|
-
const processed = await this.executeGuardrail(guardrail, processId, currentResult);
|
|
121
|
-
if (processed === null || processed === undefined) {
|
|
122
|
-
throw new Error(`Guardrail '${guardrail.name}' returned null/undefined`);
|
|
123
|
-
}
|
|
124
|
-
currentResult = processed;
|
|
125
|
-
} catch (error) {
|
|
126
|
-
throw new Error(`Post-call guardrail '${guardrail.name}' failed: ${error.message}`);
|
|
127
|
-
}
|
|
128
|
-
}
|
|
129
|
-
}
|
|
130
|
-
|
|
131
|
-
return currentResult;
|
|
132
|
-
}
|
|
133
|
-
|
|
134
|
-
async executeGuardrail(guardrail, processId, data) {
|
|
135
|
-
if (typeof guardrail.code === 'string') {
|
|
136
|
-
// Execute string code as function
|
|
137
|
-
const func = new Function('processId', 'data', `return (${guardrail.code})(processId, data)`);
|
|
138
|
-
return await func(processId, data);
|
|
139
|
-
} else if (typeof guardrail.code === 'function') {
|
|
140
|
-
return await guardrail.code(processId, data);
|
|
141
|
-
} else {
|
|
142
|
-
throw new Error(`Invalid guardrail code for '${guardrail.name}'`);
|
|
143
|
-
}
|
|
144
|
-
}
|
|
145
|
-
|
|
146
|
-
async completion(options) {
|
|
147
|
-
const processId = this.generateProcessId();
|
|
148
|
-
let selectedModel;
|
|
149
|
-
|
|
150
|
-
try {
|
|
151
|
-
// Select model
|
|
152
|
-
selectedModel = this.selectModel(options.model);
|
|
153
|
-
console.log(`[${processId}] Selected model: ${selectedModel.llm_params.model}`);
|
|
154
|
-
|
|
155
|
-
// Prepare input for guardrails
|
|
156
|
-
const input = {
|
|
157
|
-
model: selectedModel.llm_params.model,
|
|
158
|
-
messages: options.messages || [],
|
|
159
|
-
temperature: options.temperature,
|
|
160
|
-
maxTokens: options.max_tokens || options.maxTokens,
|
|
161
|
-
topP: options.top_p || options.topP,
|
|
162
|
-
frequencyPenalty: options.frequency_penalty || options.frequencyPenalty,
|
|
163
|
-
presencePenalty: options.presence_penalty || options.presencePenalty,
|
|
164
|
-
stop: options.stop,
|
|
165
|
-
tools: options.tools,
|
|
166
|
-
toolChoice: options.tool_choice || options.toolChoice
|
|
167
|
-
};
|
|
168
|
-
|
|
169
|
-
// Apply pre-call guardrails
|
|
170
|
-
const processedInput = await this.applyPreGuardrails(processId, input);
|
|
171
|
-
|
|
172
|
-
// Create completion using selected model
|
|
173
|
-
const result = await this.callProvider(selectedModel, processedInput);
|
|
174
|
-
|
|
175
|
-
// Apply post-call guardrails
|
|
176
|
-
const finalResult = await this.applyPostGuardrails(processId, result);
|
|
177
|
-
|
|
178
|
-
// Return result with selected model information
|
|
179
|
-
return {
|
|
180
|
-
result: finalResult,
|
|
181
|
-
selectedModel: selectedModel.llm_params.model, // Full model name with provider prefix
|
|
182
|
-
selectedModelName: selectedModel.model_name // User-friendly model name
|
|
183
|
-
};
|
|
184
|
-
|
|
185
|
-
} catch (error) {
|
|
186
|
-
logger.error(`Router error`, { processId, error: error.message });
|
|
187
|
-
throw error;
|
|
188
|
-
}
|
|
189
|
-
}
|
|
190
|
-
|
|
191
|
-
async callProvider(modelConfig, input) {
|
|
192
|
-
const firstSlashIndex = modelConfig.llm_params.model.indexOf('/');
|
|
193
|
-
const providerName = firstSlashIndex !== -1 ? modelConfig.llm_params.model.substring(0, firstSlashIndex) : '';
|
|
194
|
-
const actualModel = firstSlashIndex !== -1 ? modelConfig.llm_params.model.substring(firstSlashIndex + 1) : modelConfig.llm_params.model;
|
|
195
|
-
const provider = this.providers[providerName];
|
|
196
|
-
|
|
197
|
-
if (!provider) {
|
|
198
|
-
throw new Error(`Unknown provider: ${providerName}`);
|
|
199
|
-
}
|
|
200
|
-
|
|
201
|
-
// Set the API key for this request (only if provided, otherwise use provider's default)
|
|
202
|
-
if (modelConfig.llm_params.api_key !== undefined) {
|
|
203
|
-
provider.apiKey = modelConfig.llm_params.api_key;
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
// Prepare the completion options
|
|
207
|
-
const completionOptions = {
|
|
208
|
-
model: actualModel,
|
|
209
|
-
temperature: input.temperature,
|
|
210
|
-
maxTokens: input.maxTokens,
|
|
211
|
-
topP: input.topP,
|
|
212
|
-
frequencyPenalty: input.frequencyPenalty,
|
|
213
|
-
presencePenalty: input.presencePenalty,
|
|
214
|
-
stop: input.stop,
|
|
215
|
-
tools: input.tools,
|
|
216
|
-
toolChoice: input.toolChoice
|
|
217
|
-
};
|
|
218
|
-
|
|
219
|
-
// Log request information
|
|
220
|
-
const apiKeyPreview = provider.apiKey ? provider.apiKey.substring(0, 10) + '...' : 'none';
|
|
221
|
-
logger.info('LLMJS2 📤 Sending to LLM provider', {
|
|
222
|
-
source: 'router',
|
|
223
|
-
provider: providerName,
|
|
224
|
-
model: actualModel,
|
|
225
|
-
apiKey: apiKeyPreview,
|
|
226
|
-
messages: input.messages,
|
|
227
|
-
options: completionOptions
|
|
228
|
-
});
|
|
229
|
-
|
|
230
|
-
// Call the provider directly with just the actual model name (without provider prefix)
|
|
231
|
-
const result = await provider.createCompletion(input.messages, completionOptions);
|
|
232
|
-
|
|
233
|
-
// Log response information
|
|
234
|
-
logger.info('LLMJS2 📥 Received from LLM provider', {
|
|
235
|
-
source: 'router',
|
|
236
|
-
...result
|
|
237
|
-
});
|
|
238
|
-
|
|
239
|
-
return result;
|
|
240
|
-
}
|
|
241
|
-
|
|
242
|
-
generateProcessId() {
|
|
243
|
-
return `req_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
244
|
-
}
|
|
245
|
-
|
|
246
|
-
// Method to add guardrails dynamically
|
|
247
|
-
addGuardrail(guardrail) {
|
|
248
|
-
this.guardrails.push(guardrail);
|
|
249
|
-
}
|
|
250
|
-
|
|
251
|
-
// Method to get available models
|
|
252
|
-
getAvailableModels() {
|
|
253
|
-
return Object.keys(this.modelsByName);
|
|
254
|
-
}
|
|
255
|
-
|
|
256
|
-
// Method to get model count for load balancing info
|
|
257
|
-
getModelStats() {
|
|
258
|
-
const stats = {};
|
|
259
|
-
Object.keys(this.modelsByName).forEach(modelName => {
|
|
260
|
-
stats[modelName] = this.modelsByName[modelName].length;
|
|
261
|
-
});
|
|
262
|
-
return stats;
|
|
263
|
-
}
|
|
264
|
-
}
|
|
265
|
-
|
|
266
|
-
function router(modelList, strategy = 'default') {
|
|
267
|
-
return new Router(modelList, strategy);
|
|
268
|
-
}
|
|
269
|
-
|
|
270
|
-
module.exports = {
|
|
271
|
-
Router,
|
|
272
|
-
router
|
|
273
|
-
};
|
package/test-completion.js
DELETED
|
@@ -1,99 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
|
|
3
|
-
/**
|
|
4
|
-
* Simple Completion Test
|
|
5
|
-
* Tests basic llmjs2 completion functionality using defaults
|
|
6
|
-
*/
|
|
7
|
-
|
|
8
|
-
const { completion } = require('./index');
|
|
9
|
-
|
|
10
|
-
async function testSimpleCompletion() {
|
|
11
|
-
console.log('🧪 Testing simple completion with defaults...\n');
|
|
12
|
-
|
|
13
|
-
try {
|
|
14
|
-
console.log('📤 Sending completion request...');
|
|
15
|
-
console.log('Prompt: "Hello! Can you tell me a short joke?"');
|
|
16
|
-
console.log('Note: No model specified - will auto-select provider based on available API keys\n');
|
|
17
|
-
|
|
18
|
-
// Simple completion call - no model or API key specified
|
|
19
|
-
const response = await completion('Hello! Can you tell me a short joke?');
|
|
20
|
-
|
|
21
|
-
console.log('✅ Completion successful!');
|
|
22
|
-
console.log('📥 Response received:');
|
|
23
|
-
console.log('---');
|
|
24
|
-
console.log(response);
|
|
25
|
-
console.log('---\n');
|
|
26
|
-
|
|
27
|
-
// Basic validation
|
|
28
|
-
if (typeof response === 'string' && response.length > 0) {
|
|
29
|
-
console.log('✅ Response is valid string');
|
|
30
|
-
console.log(`📏 Response length: ${response.length} characters`);
|
|
31
|
-
} else {
|
|
32
|
-
console.log('❌ Response is not a valid string');
|
|
33
|
-
return false;
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
// Check if it looks like a joke or response
|
|
37
|
-
const hasJokeIndicators = response.toLowerCase().includes('joke') ||
|
|
38
|
-
response.toLowerCase().includes('knock knock') ||
|
|
39
|
-
response.includes('?') ||
|
|
40
|
-
response.includes('!');
|
|
41
|
-
|
|
42
|
-
if (hasJokeIndicators) {
|
|
43
|
-
console.log('✅ Response appears to be joke-related');
|
|
44
|
-
} else {
|
|
45
|
-
console.log('ℹ️ Response received (may not be joke-related)');
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
console.log('\n🎉 Simple completion test passed!');
|
|
49
|
-
return true;
|
|
50
|
-
|
|
51
|
-
} catch (error) {
|
|
52
|
-
console.error('❌ Completion test failed:');
|
|
53
|
-
console.error('Error:', error.message);
|
|
54
|
-
|
|
55
|
-
if (error.message.includes('API key')) {
|
|
56
|
-
console.log('\n💡 Tip: Make sure you have API keys set in environment variables:');
|
|
57
|
-
console.log(' export OLLAMA_API_KEY=your_key');
|
|
58
|
-
console.log(' export OPEN_ROUTER_API_KEY=your_key');
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
return false;
|
|
62
|
-
}
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
// Check environment before running
|
|
66
|
-
function checkEnvironment() {
|
|
67
|
-
const hasKeys = process.env.OLLAMA_API_KEY || process.env.OPEN_ROUTER_API_KEY;
|
|
68
|
-
|
|
69
|
-
if (!hasKeys) {
|
|
70
|
-
console.log('⚠️ No API keys found in environment variables.');
|
|
71
|
-
console.log(' Set at least one: OLLAMA_API_KEY or OPEN_ROUTER_API_KEY');
|
|
72
|
-
console.log(' The test will still run but may fail.\n');
|
|
73
|
-
} else {
|
|
74
|
-
console.log('✅ API keys found in environment\n');
|
|
75
|
-
}
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
// Run the test
|
|
79
|
-
async function main() {
|
|
80
|
-
console.log('🚀 Running Simple Completion Test for llmjs2\n');
|
|
81
|
-
|
|
82
|
-
checkEnvironment();
|
|
83
|
-
|
|
84
|
-
const success = await testSimpleCompletion();
|
|
85
|
-
|
|
86
|
-
if (success) {
|
|
87
|
-
console.log('\n✨ Test completed successfully!');
|
|
88
|
-
process.exit(0);
|
|
89
|
-
} else {
|
|
90
|
-
console.log('\n💥 Test failed!');
|
|
91
|
-
process.exit(1);
|
|
92
|
-
}
|
|
93
|
-
}
|
|
94
|
-
|
|
95
|
-
if (require.main === module) {
|
|
96
|
-
main();
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
module.exports = { testSimpleCompletion };
|