llmjs2 1.3.8 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/README.md +31 -476
  2. package/chain/AGENT_STEP_README.md +102 -0
  3. package/chain/README.md +257 -0
  4. package/chain/WORKFLOW_README.md +85 -0
  5. package/chain/agent-step-example.js +232 -0
  6. package/chain/docs/AGENT.md +126 -0
  7. package/chain/docs/GRAPH.md +490 -0
  8. package/chain/examples.js +314 -0
  9. package/chain/index.js +31 -0
  10. package/chain/lib/agent.js +338 -0
  11. package/chain/lib/flow/agent-step.js +119 -0
  12. package/chain/lib/flow/edge.js +24 -0
  13. package/chain/lib/flow/flow.js +76 -0
  14. package/chain/lib/flow/graph.js +331 -0
  15. package/chain/lib/flow/index.js +7 -0
  16. package/chain/lib/flow/step.js +63 -0
  17. package/chain/lib/memory/in-memory.js +117 -0
  18. package/chain/lib/memory/index.js +36 -0
  19. package/chain/lib/memory/lance-memory.js +225 -0
  20. package/chain/lib/memory/sqlite-memory.js +309 -0
  21. package/chain/simple-agent-step-example.js +168 -0
  22. package/chain/workflow-example-usage.js +70 -0
  23. package/chain/workflow-example.json +59 -0
  24. package/core/README.md +485 -0
  25. package/core/cli.js +275 -0
  26. package/core/docs/BASIC_USAGE.md +62 -0
  27. package/core/docs/CLI.md +104 -0
  28. package/{docs → core/docs}/GET_STARTED.md +129 -129
  29. package/{docs → core/docs}/GUARDRAILS_GUIDE.md +734 -734
  30. package/{docs → core/docs}/README.md +47 -47
  31. package/core/docs/ROUTER_GUIDE.md +199 -0
  32. package/{docs → core/docs}/SERVER_MODE.md +358 -350
  33. package/core/index.js +115 -0
  34. package/{providers → core/providers}/ollama.js +14 -6
  35. package/{providers → core/providers}/openai.js +14 -6
  36. package/{providers → core/providers}/openrouter.js +14 -6
  37. package/core/router.js +252 -0
  38. package/{server.js → core/server.js} +15 -5
  39. package/package.json +43 -27
  40. package/cli.js +0 -195
  41. package/docs/BASIC_USAGE.md +0 -296
  42. package/docs/CLI.md +0 -455
  43. package/docs/ROUTER_GUIDE.md +0 -402
  44. package/index.js +0 -265
  45. package/router.js +0 -273
  46. package/test-completion.js +0 -99
  47. package/test.js +0 -246
  48. /package/{config.yaml → core/config.yaml} +0 -0
  49. /package/{logger.js → core/logger.js} +0 -0
@@ -1,402 +0,0 @@
1
- # llmjs2 Router Usage Guide
2
-
3
- The llmjs2 router provides intelligent model routing and load balancing capabilities, allowing you to distribute requests across multiple model deployments with different strategies.
4
-
5
- ## Overview
6
-
7
- The router system enables:
8
-
9
- - **Load balancing** across models with the same name
10
- - **Multiple routing strategies** (default, random, sequential)
11
- - **Provider-agnostic routing** with unified API
12
- - **Flexible model configuration** for different providers
13
- -
14
-
15
- ## Quick Start
16
-
17
- ### Basic Setup
18
-
19
- ```javascript
20
- import { router } from 'llmjs2';
21
-
22
- // Define your model deployments
23
- const modelList = [
24
- {
25
- "model_name": "gpt-3.5-turbo",
26
- "llm_params": {
27
- "model": "ollama/chatgpt-v-2",
28
- "api_key": process.env.OLLAMA_API_KEY,
29
- "api_base": process.env.OLLAMA_API_BASE
30
- }
31
- },
32
- {
33
- "model_name": "openai-turbo",
34
- "llm_params": {
35
- "model": "gpt-3.5-turbo",
36
- "api_key": process.env.OPENAI_API_KEY
37
- }
38
- },
39
- {
40
- "model_name": "gpt-4",
41
- "llm_params": {
42
- "model": "ollama/gpt-4",
43
- "api_key": process.env.OLLAMA_API_KEY,
44
- "api_base": process.env.OLLAMA_API_BASE
45
- }
46
- }
47
- ];
48
-
49
- // Create routers with different strategies
50
- const defaultRouter = router(modelList);
51
- const randomRouter = router(modelList, 'random');
52
- const sequentialRouter = router(modelList, 'sequential');
53
- ```
54
-
55
- ### Basic Usage
56
-
57
- ```javascript
58
- // Route to specific model
59
- const response = await defaultRouter.completion({
60
- model: "gpt-3.5-turbo",
61
- messages: [{"role": "user", "content": "Hey, how's it going?"}]
62
- });
63
-
64
- // Auto-route with random strategy
65
- const randomResponse = await randomRouter.completion({
66
- messages: [{"role": "user", "content": "Hey, how's it going?"}]
67
- });
68
-
69
- // Auto-route with sequential strategy
70
- const seqResponse = await sequentialRouter.completion({
71
- messages: [{"role": "user", "content": "Hey, how's it going?"}]
72
- });
73
- ```
74
-
75
- ## Model Configuration
76
-
77
- ### Model List Format
78
-
79
- Each model in the list is defined with:
80
-
81
- ```javascript
82
- {
83
- "model_name": "string", // Alias for routing (can have multiple providers)
84
- "llm_params": { // Provider-specific parameters
85
- "model": "string", // Actual model identifier for the provider
86
- "api_key": "string", // API key (can use environment variables)
87
- "api_base": "string?", // Custom API base URL (optional)
88
- // ... other provider-specific params
89
- }
90
- }
91
- ```
92
-
93
- ### Supported Providers
94
-
95
- #### Ollama
96
-
97
- ```javascript
98
- {
99
- "model_name": "my-ollama-model",
100
- "llm_params": {
101
- "model": "ollama/llama2",
102
- "api_key": process.env.OLLAMA_API_KEY,
103
- "api_base": process.env.OLLAMA_API_BASE
104
- }
105
- }
106
- ```
107
-
108
- #### OpenRouter
109
-
110
- ```javascript
111
- {
112
- "model_name": "my-openrouter-model",
113
- "llm_params": {
114
- "model": "openrouter/free-model",
115
- "api_key": process.env.OPEN_ROUTER_API_KEY
116
- }
117
- }
118
- ```
119
-
120
- #### OpenAI
121
-
122
- ```javascript
123
- {
124
- "model_name": "my-openai-model",
125
- "llm_params": {
126
- "model": "openai/gpt-4",
127
- "api_key": process.env.OPENAI_API_KEY
128
- }
129
- }
130
- ```
131
-
132
- ## Routing Strategies
133
-
134
- ### Default Strategy
135
-
136
- When no strategy is specified, uses sequential selection across all available models for auto-routing (when no specific model is requested).
137
-
138
- ```javascript
139
- const route = router(modelList); // or router(modelList, 'default')
140
-
141
- // Auto-route with sequential selection (cycles through all models)
142
- const response = await route.completion({
143
- messages: [...]
144
- });
145
-
146
- // Routes to one of the models with model_name="gpt-3.5-turbo" (load balancing)
147
- const response = await route.completion({
148
- model: "gpt-3.5-turbo",
149
- messages: [...]
150
- });
151
- ```
152
-
153
- ### Random Strategy
154
-
155
- Randomly selects from available models when no specific model is requested.
156
-
157
- ```javascript
158
- const route = router(modelList, 'random');
159
-
160
- // Randomly selects from ALL models in the list
161
- const response = await route.completion({
162
- messages: [...]
163
- });
164
- ```
165
-
166
- ### Sequential Strategy
167
-
168
- Cycles through models in order for each request.
169
-
170
- ```javascript
171
- const route = router(modelList, 'sequential');
172
-
173
- // Uses first model, then second, then third, etc.
174
- const response1 = await route.completion({ messages: [...] }); // model 1
175
- const response2 = await route.completion({ messages: [...] }); // model 2
176
- const response3 = await route.completion({ messages: [...] }); // model 3
177
- // ... cycles back to model 1
178
- ```
179
-
180
- ## Advanced Usage
181
-
182
- ### Load Balancing
183
-
184
- Multiple models with the same `model_name` enable load balancing:
185
-
186
- ```javascript
187
- const modelList = [
188
- {
189
- "model_name": "gpt-3.5-turbo",
190
- "llm_params": {
191
- "model": "ollama/chatgpt-v-2",
192
- "api_key": process.env.OLLAMA_API_KEY
193
- }
194
- },
195
- {
196
- "model_name": "gpt-3.5-turbo", // Same name - load balancing
197
- "llm_params": {
198
- "model": "openai/gpt-3.5-turbo",
199
- "api_key": process.env.OPENAI_API_KEY
200
- }
201
- },
202
- {
203
- "model_name": "gpt-3.5-turbo", // Same name - load balancing
204
- "llm_params": {
205
- "model": "openrouter/minimax2.5",
206
- "api_key": process.env.OPEN_ROUTER_API_KEY,
207
- "api_base": process.env.OPEN_ROUTER_BASE
208
- }
209
- }
210
- ];
211
-
212
- const route = router(modelList);
213
-
214
- // This will load balance across all 3 "gpt-3.5-turbo" models
215
- const response = await route.completion({
216
- model: "gpt-3.5-turbo",
217
- messages: [...]
218
- });
219
- ```
220
-
221
- ### Environment Variables
222
-
223
- Use environment variables for configuration:
224
-
225
- ```javascript
226
- const modelList = [
227
- {
228
- "model_name": "production-gpt4",
229
- "llm_params": {
230
- "model": "openai/gpt-4",
231
- "api_key": process.env.OPENAI_API_KEY,
232
- "api_base": process.env.OPENAI_API_BASE || "https://api.openai.com/v1"
233
- }
234
- },
235
- {
236
- "model_name": "staging-gpt4",
237
- "llm_params": {
238
- "model": "ollama/gpt-4",
239
- "api_key": process.env.OLLAMA_API_KEY,
240
- "api_base": process.env.OLLAMA_API_BASE
241
- }
242
- }
243
- ];
244
- ```
245
-
246
- ### Complete API Reference
247
-
248
- ```javascript
249
- import { router } from 'llmjs2';
250
-
251
- // Create router
252
- const myRouter = router(modelList, strategy);
253
-
254
- // Completion with specific model
255
- const response1 = await myRouter.completion({
256
- model: "model_name", // Optional: specific model to route to
257
- messages: [...], // Required: chat messages
258
- tools: [...], // Optional: function calling tools
259
- // ... other completion params
260
- });
261
-
262
- // Auto-routing completion
263
- const response2 = await myRouter.completion({
264
- messages: [...], // Required: chat messages
265
- // Uses routing strategy when no model specified
266
- });
267
- ```
268
-
269
- ## Error Handling
270
-
271
- ```javascript
272
- try {
273
- const response = await route.completion({
274
- model: "non-existent-model",
275
- messages: [{"role": "user", "content": "Hello"}]
276
- });
277
- } catch (error) {
278
- if (error.message.includes('Model not found')) {
279
- console.log('Model not configured in router');
280
- } else if (error.message.includes('API key')) {
281
- console.log('Provider API key missing');
282
- } else {
283
- console.log('Routing error:', error.message);
284
- }
285
- }
286
- ```
287
-
288
- ## Use Cases
289
-
290
- ### Multi-Provider Fallback
291
-
292
- ```javascript
293
- const fallbackModels = [
294
- {
295
- "model_name": "gpt-4_1",
296
- "llm_params": { "model": "openai/gpt-4", "api_key": process.env.OPENAI_API_KEY }
297
- },
298
- {
299
- "model_name": "gpt-4_2",
300
- "llm_params": { "model": "ollama/gpt-4", "api_key": process.env.OLLAMA_API_KEY }
301
- },
302
- {
303
- "model_name": "gpt-4_3",
304
- "llm_params": { "model": "openrouter/gpt-4", "api_key": process.env.OPEN_ROUTER_API_KEY }
305
- }
306
- ];
307
-
308
- const route = router(fallbackModels);
309
-
310
- // Automatically tries different providers if one fails
311
- const response = await route.completion({
312
- messages: [...]
313
- });
314
- ```
315
-
316
- ### Cost Optimization
317
-
318
- ```javascript
319
- const costOptimizedModels = [
320
- {
321
- "model_name": "text-davinci-001",
322
- "llm_params": { "model": "ollama/text-davinci-003", "api_key": process.env.OLLAMA_API_KEY }
323
- },
324
- {
325
- "model_name": "text-davinci-002",
326
- "llm_params": { "model": "openrouter/text-davinci-003", "api_key": process.env.OPENROUTER_API_KEY }
327
- },
328
- {
329
- "model_name": "text-davinci-003",
330
- "llm_params": { "model": "openai/gpt-3.5-turbo", "api_key": process.env.OPENAI_API_KEY }
331
- }
332
- ];
333
-
334
- const route = router(costOptimizedModels, 'random');
335
-
336
- // Load balances across cheaper providers
337
- const response = await route.completion({
338
- model: "text-davinci-003",
339
- messages: [...]
340
- });
341
- ```
342
-
343
- ### A/B Testing
344
-
345
- ```javascript
346
- const abTestModels = [
347
- {
348
- "model_name": "experiment-a",
349
- "llm_params": { "model": "gpt-4", "api_key": process.env.OPENAI_API_KEY }
350
- },
351
- {
352
- "model_name": "experiment-b",
353
- "llm_params": { "model": "ollama/gpt-4", "api_key": process.env.OLLAMA_API_KEY }
354
- }
355
- ];
356
-
357
- const route = router(abTestModels, 'random');
358
-
359
- // Randomly routes between experiment variants
360
- const response = await route.completion({
361
- model: "experiment-a", // or "experiment-b"
362
- messages: [...]
363
- });
364
- ```
365
-
366
- ## Configuration Examples
367
-
368
- ### Production Setup
369
-
370
- ```javascript
371
- const productionModels = [
372
- // Primary OpenAI models
373
- { "model_name": "gpt-4", "llm_params": { "model": "gpt-4", "api_key": process.env.OPENAI_API_KEY } },
374
- { "model_name": "gpt-3.5-turbo", "llm_params": { "model": "openai/gpt-3.5-turbo", "api_key": process.env.OPENAI_API_KEY } },
375
-
376
- // Fallback Ollama models
377
- { "model_name": "gpt-4", "llm_params": { "model": "ollama/gpt-4", "api_key": process.env.OLLAMA_API_KEY } },
378
- { "model_name": "gpt-3.5-turbo", "llm_params": { "model": "ollama/gpt-3.5-turbo", "api_key": process.env.OLLAMA_API_KEY } },
379
-
380
- // Cost-effective alternatives
381
- { "model_name": "gpt-3.5-turbo", "llm_params": { "model": "openrouter/openrouter/free", "api_key": process.env.OPENROUTER_API_KEY } }
382
- ];
383
-
384
- export const productionRouter = router(productionModels);
385
- export const stagingRouter = router(productionModels, 'sequential');
386
- ```
387
-
388
- ### Development Setup
389
-
390
- ```javascript
391
- const devModels = [
392
- // Mock/echo models for testing
393
- { "model_name": "echo", "llm_params": { "model": "echo", "api_key": "dev" } },
394
-
395
- // Single provider for consistency
396
- { "model_name": "gpt-3.5-turbo", "llm_params": { "model": "ollama/gpt-3.5-turbo", "api_key": process.env.OLLAMA_API_KEY } }
397
- ];
398
-
399
- export const devRouter = router(devModels, 'sequential');
400
- ```
401
-
402
- This router system provides powerful routing capabilities while maintaining a simple, unified API for LLM completion across multiple providers and deployment strategies.
package/index.js DELETED
@@ -1,265 +0,0 @@
1
- const OpenAIProvider = require('./providers/openai');
2
- const OllamaProvider = require('./providers/ollama');
3
- const OpenRouterProvider = require('./providers/openrouter');
4
- const { router } = require('./router');
5
- const { app } = require('./server');
6
- const logger = require('./logger');
7
-
8
- // Module-level variable to track sequential provider selection across completion calls
9
- let autoProviderIndex = 0;
10
-
11
- class LLMJS2 {
12
- constructor(config = {}) {
13
- this.providers = {
14
- openai: new OpenAIProvider(config.openai || {}),
15
- ollama: new OllamaProvider(config.ollama || {}),
16
- openrouter: new OpenRouterProvider(config.openrouter || {})
17
- };
18
- this.defaultProvider = config.defaultProvider;
19
- this.timeout = config.timeout || 60000;
20
- }
21
-
22
- /**
23
- * Get available providers based on API keys
24
- */
25
- getAvailableProviders() {
26
- const available = [];
27
-
28
- const openaiKey = process.env.OPENAI_API_KEY || this.providers.openai.apiKey;
29
- const ollamaKey = process.env.OLLAMA_API_KEY || this.providers.ollama.apiKey;
30
- const openrouterKey = process.env.OPEN_ROUTER_API_KEY || this.providers.openrouter.apiKey;
31
-
32
- // Check if keys are non-empty and not placeholder values
33
- if (openaiKey && typeof openaiKey === 'string' && openaiKey.trim() && !openaiKey.startsWith(':')) {
34
- available.push('openai');
35
- }
36
- if (ollamaKey && typeof ollamaKey === 'string' && ollamaKey.trim() && !ollamaKey.startsWith(':')) {
37
- available.push('ollama');
38
- }
39
- if (openrouterKey && typeof openrouterKey === 'string' && openrouterKey.trim() && !openrouterKey.startsWith(':')) {
40
- available.push('openrouter');
41
- }
42
-
43
- return available;
44
- }
45
-
46
- /**
47
- * Parse model string like 'provider/model_name' or just 'model_name'
48
- * Only splits on the first '/', since model names can contain '/' characters
49
- */
50
- parseModel(modelString) {
51
- if (!modelString || typeof modelString !== 'string') {
52
- return { provider: null, model: null };
53
- }
54
-
55
- const firstSlashIndex = modelString.indexOf('/');
56
- if (firstSlashIndex !== -1) {
57
- return {
58
- provider: modelString.substring(0, firstSlashIndex),
59
- model: modelString.substring(firstSlashIndex + 1)
60
- };
61
- }
62
-
63
- return { provider: null, model: modelString };
64
- }
65
-
66
- /**
67
- * Auto-select provider based on available API keys
68
- * Cycles through available providers sequentially
69
- */
70
- getAutoProvider() {
71
- // Check which API keys are available
72
- const availableProviders = [];
73
-
74
- if (process.env.OLLAMA_API_KEY) {
75
- availableProviders.push('ollama');
76
- }
77
- if (process.env.OPEN_ROUTER_API_KEY) {
78
- availableProviders.push('openrouter');
79
- }
80
- if (process.env.OPENAI_API_KEY) {
81
- availableProviders.push('openai');
82
- }
83
-
84
- if (availableProviders.length === 0) {
85
- throw new Error('No API keys found. Set OLLAMA_API_KEY, OPEN_ROUTER_API_KEY, or OPENAI_API_KEY environment variables.');
86
- }
87
-
88
- // Get next provider in sequence using module-level index
89
- const providerName = availableProviders[autoProviderIndex];
90
- autoProviderIndex = (autoProviderIndex + 1) % availableProviders.length;
91
-
92
- const provider = this.providers[providerName];
93
- const model = provider.defaultModel;
94
-
95
- return { provider, model };
96
- }
97
-
98
- /**
99
- * Determine which provider to use
100
- */
101
- getProvider(modelString, options = {}) {
102
- const { provider: specifiedProvider, model } = this.parseModel(modelString);
103
-
104
- if (specifiedProvider) {
105
- if (!this.providers[specifiedProvider]) {
106
- throw new Error(`Unknown provider: ${specifiedProvider}`);
107
- }
108
- return { provider: this.providers[specifiedProvider], model };
109
- }
110
-
111
- // Auto-detect provider
112
- const availableProviders = this.getAvailableProviders();
113
-
114
- if (availableProviders.length === 0) {
115
- throw new Error('No API keys configured. Set OPENAI_API_KEY, OLLAMA_API_KEY, or OPEN_ROUTER_API_KEY environment variables.');
116
- }
117
-
118
- // Use default provider if specified, otherwise use first available
119
- const providerName = this.defaultProvider || availableProviders[0];
120
- const provider = this.providers[providerName];
121
-
122
- if (!provider) {
123
- throw new Error(`Provider ${providerName} is not available`);
124
- }
125
-
126
- return { provider, model: model || provider.defaultModel };
127
- }
128
-
129
- /**
130
- * Validate input parameters
131
- */
132
- validateInput(input) {
133
- if (typeof input === 'string') {
134
- // Simple string prompt
135
- if (!input.trim()) {
136
- throw new Error('Prompt cannot be empty');
137
- }
138
- return {
139
- model: null,
140
- messages: [{ role: 'user', content: input }],
141
- options: { final: true }
142
- };
143
- }
144
-
145
- if (typeof input === 'object' && input !== null) {
146
- // Object-based API
147
- if (!input.messages || !Array.isArray(input.messages)) {
148
- throw new Error('messages must be an array');
149
- }
150
-
151
- if (input.messages.length === 0) {
152
- throw new Error('messages array cannot be empty');
153
- }
154
-
155
- // Validate message format
156
- for (const msg of input.messages) {
157
- if (!msg.role || !msg.content) {
158
- throw new Error('Each message must have role and content properties');
159
- }
160
- if (!['system', 'user', 'assistant'].includes(msg.role)) {
161
- throw new Error('Message role must be system, user, or assistant');
162
- }
163
- }
164
-
165
- return {
166
- model: input.model,
167
- messages: input.messages,
168
- options: {
169
- temperature: input.temperature,
170
- maxTokens: input.max_tokens || input.maxTokens,
171
- topP: input.top_p || input.topP,
172
- frequencyPenalty: input.frequency_penalty || input.frequencyPenalty,
173
- presencePenalty: input.presence_penalty || input.presencePenalty,
174
- stop: input.stop,
175
- tools: input.tools,
176
- toolChoice: input.tool_choice || input.toolChoice,
177
- apiKey: input.apiKey,
178
- timeout: input.timeout,
179
- final: input.final ?? true
180
- }
181
- };
182
- }
183
-
184
- throw new Error('Input must be a string or object with messages');
185
- }
186
-
187
- /**
188
- * Main completion function
189
- */
190
- async completion(input) {
191
- try {
192
- const { model, messages, options } = this.validateInput(input);
193
- const { final, ...providerOptions } = options;
194
-
195
- let provider, finalModel;
196
-
197
- if (model) {
198
- // Specific model requested
199
- const result = this.getProvider(model, options);
200
- provider = result.provider;
201
- finalModel = result.model;
202
- } else {
203
- // No model specified - auto-select provider based on available API keys
204
- const autoSelection = this.getAutoProvider();
205
- provider = autoSelection.provider;
206
- finalModel = autoSelection.model;
207
- }
208
-
209
- // Override provider API key if specified in options
210
- if (options.apiKey) {
211
- provider.apiKey = options.apiKey;
212
- }
213
-
214
- // Override timeout if specified
215
- if (options.timeout) {
216
- provider.timeout = options.timeout;
217
- }
218
-
219
- // Log request information
220
- const apiKeyPreview = provider.apiKey ? provider.apiKey.substring(0, 10) + '...' : 'none';
221
- logger.info('LLMJS2 📤 Sending to LLM provider', {
222
- source: 'completion',
223
- provider: provider.constructor.name.replace('Provider', '').toLowerCase(),
224
- model: finalModel,
225
- apiKey: apiKeyPreview,
226
- messages: messages
227
- });
228
-
229
- const result = await provider.createCompletion(messages, { ...providerOptions, model: finalModel });
230
-
231
- // Log response information
232
- logger.info('LLMJS2 📥 Received from LLM provider', {
233
- source: 'completion',
234
- ...result
235
- });
236
-
237
- return final ? result.content : result;
238
-
239
- } catch (error) {
240
- // Sanitize error message to avoid leaking sensitive information
241
- const message = error.message || 'Unknown error occurred';
242
-
243
- // Don't include API keys or other sensitive data in error messages
244
- const sanitizedMessage = message.replace(/Bearer\s+[^\s]+/gi, 'Bearer [REDACTED]');
245
-
246
- throw new Error(`LLMJS2 completion failed: ${sanitizedMessage}`);
247
- }
248
- }
249
- }
250
-
251
- // Export the completion function directly for convenience
252
- function completion(input) {
253
- const llm = new LLMJS2();
254
- return llm.completion(input);
255
- }
256
-
257
- module.exports = {
258
- completion,
259
- LLMJS2,
260
- router,
261
- app,
262
- OpenAIProvider,
263
- OllamaProvider,
264
- OpenRouterProvider
265
- };