brains-cli 0.1.0 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +2 -0
- package/README.md +64 -81
- package/bin/brains.js +23 -1
- package/package.json +3 -1
- package/src/api.js +115 -0
- package/src/commands/config.js +236 -0
- package/src/commands/install.js +3 -1
- package/src/commands/run.js +477 -317
- package/src/config.js +237 -0
- package/src/providers/anthropic.js +83 -0
- package/src/providers/openai-compat.js +96 -0
- package/src/providers/registry.js +125 -0
- package/src/utils/files.js +239 -0
package/src/config.js
ADDED
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const path = require('path');
|
|
4
|
+
const fs = require('fs');
|
|
5
|
+
const os = require('os');
|
|
6
|
+
const { getProviderDef } = require('./providers/registry');
|
|
7
|
+
|
|
8
|
+
const BRAINS_DIR = path.join(os.homedir(), '.brains');
|
|
9
|
+
const CONFIG_FILE = path.join(BRAINS_DIR, 'config.json');
|
|
10
|
+
const SESSIONS_DIR = path.join(BRAINS_DIR, 'sessions');
|
|
11
|
+
|
|
12
|
+
const DEFAULTS = {
|
|
13
|
+
provider: 'groq',
|
|
14
|
+
max_tokens: 8096,
|
|
15
|
+
theme: 'dark',
|
|
16
|
+
telemetry: false,
|
|
17
|
+
};
|
|
18
|
+
|
|
19
|
+
function ensureDirs() {
|
|
20
|
+
if (!fs.existsSync(BRAINS_DIR)) {
|
|
21
|
+
fs.mkdirSync(BRAINS_DIR, { recursive: true });
|
|
22
|
+
}
|
|
23
|
+
if (!fs.existsSync(SESSIONS_DIR)) {
|
|
24
|
+
fs.mkdirSync(SESSIONS_DIR, { recursive: true });
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
function loadConfig() {
|
|
29
|
+
ensureDirs();
|
|
30
|
+
try {
|
|
31
|
+
if (fs.existsSync(CONFIG_FILE)) {
|
|
32
|
+
const raw = JSON.parse(fs.readFileSync(CONFIG_FILE, 'utf-8'));
|
|
33
|
+
return migrateConfig(raw);
|
|
34
|
+
}
|
|
35
|
+
} catch (e) {
|
|
36
|
+
// corrupted config, reset
|
|
37
|
+
}
|
|
38
|
+
return { ...DEFAULTS, providers: {} };
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Migrate old config format (flat api_key/model) to new provider-based format.
|
|
43
|
+
*/
|
|
44
|
+
function migrateConfig(raw) {
|
|
45
|
+
const config = { ...DEFAULTS, ...raw };
|
|
46
|
+
|
|
47
|
+
// Ensure providers object exists
|
|
48
|
+
if (!config.providers) {
|
|
49
|
+
config.providers = {};
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// Migrate old flat api_key → providers.anthropic.api_key
|
|
53
|
+
if (config.api_key && !config.providers.anthropic) {
|
|
54
|
+
config.providers.anthropic = {
|
|
55
|
+
api_key: config.api_key,
|
|
56
|
+
model: config.model || 'claude-sonnet-4-20250514',
|
|
57
|
+
};
|
|
58
|
+
delete config.api_key;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
// Migrate old flat model if it was an anthropic model
|
|
62
|
+
if (config.model && config.model.startsWith('claude-')) {
|
|
63
|
+
if (!config.providers.anthropic) config.providers.anthropic = {};
|
|
64
|
+
config.providers.anthropic.model = config.model;
|
|
65
|
+
delete config.model;
|
|
66
|
+
} else if (config.model) {
|
|
67
|
+
delete config.model;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// If no provider is set, default to groq
|
|
71
|
+
if (!config.provider) {
|
|
72
|
+
config.provider = DEFAULTS.provider;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
return config;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
function saveConfig(config) {
|
|
79
|
+
ensureDirs();
|
|
80
|
+
fs.writeFileSync(CONFIG_FILE, JSON.stringify(config, null, 2));
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
function getConfigValue(key) {
|
|
84
|
+
const config = loadConfig();
|
|
85
|
+
return config[key];
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
function setConfigValue(key, value) {
|
|
89
|
+
const config = loadConfig();
|
|
90
|
+
config[key] = value;
|
|
91
|
+
saveConfig(config);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// ═══════════════════════════════════════════
|
|
95
|
+
// PROVIDER-AWARE GETTERS
|
|
96
|
+
// ═══════════════════════════════════════════
|
|
97
|
+
|
|
98
|
+
/**
|
|
99
|
+
* Get the currently active provider name.
|
|
100
|
+
*/
|
|
101
|
+
function getActiveProvider() {
|
|
102
|
+
const config = loadConfig();
|
|
103
|
+
return config.provider || DEFAULTS.provider;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
/**
|
|
107
|
+
* Resolve API key for a provider: env var > config > null
|
|
108
|
+
*/
|
|
109
|
+
function getProviderApiKey(providerName) {
|
|
110
|
+
const providerDef = getProviderDef(providerName);
|
|
111
|
+
|
|
112
|
+
// 1. Check environment variable
|
|
113
|
+
if (providerDef && providerDef.key_env && process.env[providerDef.key_env]) {
|
|
114
|
+
return process.env[providerDef.key_env];
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// 2. Check config file
|
|
118
|
+
const config = loadConfig();
|
|
119
|
+
const providerConfig = config.providers && config.providers[providerName];
|
|
120
|
+
if (providerConfig && providerConfig.api_key) {
|
|
121
|
+
return providerConfig.api_key;
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
return null;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Get the model for a provider: config > default
|
|
129
|
+
*/
|
|
130
|
+
function getProviderModel(providerName) {
|
|
131
|
+
const config = loadConfig();
|
|
132
|
+
const providerConfig = config.providers && config.providers[providerName];
|
|
133
|
+
if (providerConfig && providerConfig.model) {
|
|
134
|
+
return providerConfig.model;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// Fallback to provider's default model
|
|
138
|
+
const providerDef = getProviderDef(providerName);
|
|
139
|
+
return providerDef ? providerDef.default_model : 'unknown';
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
/**
|
|
143
|
+
* Get the base URL for a provider: config override > provider default
|
|
144
|
+
*/
|
|
145
|
+
function getProviderBaseUrl(providerName) {
|
|
146
|
+
const config = loadConfig();
|
|
147
|
+
const providerConfig = config.providers && config.providers[providerName];
|
|
148
|
+
if (providerConfig && providerConfig.base_url) {
|
|
149
|
+
return providerConfig.base_url;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
const providerDef = getProviderDef(providerName);
|
|
153
|
+
return providerDef ? providerDef.base_url : null;
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
/**
|
|
157
|
+
* Save API key for a specific provider.
|
|
158
|
+
*/
|
|
159
|
+
function setProviderApiKey(providerName, apiKey) {
|
|
160
|
+
const config = loadConfig();
|
|
161
|
+
if (!config.providers) config.providers = {};
|
|
162
|
+
if (!config.providers[providerName]) config.providers[providerName] = {};
|
|
163
|
+
config.providers[providerName].api_key = apiKey;
|
|
164
|
+
saveConfig(config);
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
/**
|
|
168
|
+
* Save model for a specific provider.
|
|
169
|
+
*/
|
|
170
|
+
function setProviderModel(providerName, model) {
|
|
171
|
+
const config = loadConfig();
|
|
172
|
+
if (!config.providers) config.providers = {};
|
|
173
|
+
if (!config.providers[providerName]) config.providers[providerName] = {};
|
|
174
|
+
config.providers[providerName].model = model;
|
|
175
|
+
saveConfig(config);
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
/**
|
|
179
|
+
* Switch the active provider.
|
|
180
|
+
*/
|
|
181
|
+
function setActiveProvider(providerName) {
|
|
182
|
+
const config = loadConfig();
|
|
183
|
+
config.provider = providerName;
|
|
184
|
+
saveConfig(config);
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// ═══════════════════════════════════════════
|
|
188
|
+
// BACKWARD-COMPAT GETTERS (used by run.js session header)
|
|
189
|
+
// ═══════════════════════════════════════════
|
|
190
|
+
|
|
191
|
+
function getModel() {
|
|
192
|
+
return getProviderModel(getActiveProvider());
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
function getApiKey() {
|
|
196
|
+
return getProviderApiKey(getActiveProvider());
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
function maskApiKey(key) {
|
|
200
|
+
if (!key || key.length < 12) return '(not set)';
|
|
201
|
+
return key.slice(0, 7) + '...' + key.slice(-4);
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
function saveSession(brainId, messages) {
|
|
205
|
+
ensureDirs();
|
|
206
|
+
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
|
207
|
+
const sessionFile = path.join(SESSIONS_DIR, `${brainId}-${timestamp}.json`);
|
|
208
|
+
fs.writeFileSync(sessionFile, JSON.stringify({
|
|
209
|
+
brainId,
|
|
210
|
+
timestamp: new Date().toISOString(),
|
|
211
|
+
messages,
|
|
212
|
+
}, null, 2));
|
|
213
|
+
return sessionFile;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
module.exports = {
|
|
217
|
+
BRAINS_DIR,
|
|
218
|
+
CONFIG_FILE,
|
|
219
|
+
SESSIONS_DIR,
|
|
220
|
+
DEFAULTS,
|
|
221
|
+
loadConfig,
|
|
222
|
+
saveConfig,
|
|
223
|
+
getConfigValue,
|
|
224
|
+
setConfigValue,
|
|
225
|
+
getActiveProvider,
|
|
226
|
+
getProviderApiKey,
|
|
227
|
+
getProviderModel,
|
|
228
|
+
getProviderBaseUrl,
|
|
229
|
+
setProviderApiKey,
|
|
230
|
+
setProviderModel,
|
|
231
|
+
setActiveProvider,
|
|
232
|
+
getApiKey,
|
|
233
|
+
getModel,
|
|
234
|
+
maskApiKey,
|
|
235
|
+
saveSession,
|
|
236
|
+
ensureDirs,
|
|
237
|
+
};
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const Anthropic = require('@anthropic-ai/sdk');
|
|
4
|
+
|
|
5
|
+
let client = null;
|
|
6
|
+
let cachedKey = null;
|
|
7
|
+
|
|
8
|
+
function getClient(apiKey) {
|
|
9
|
+
// Re-create if key changed
|
|
10
|
+
if (client && cachedKey === apiKey) return client;
|
|
11
|
+
client = new Anthropic({ apiKey });
|
|
12
|
+
cachedKey = apiKey;
|
|
13
|
+
return client;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Stream a message using Anthropic's native API.
|
|
18
|
+
* System prompt is a separate parameter (not a message).
|
|
19
|
+
*/
|
|
20
|
+
async function streamMessage(opts) {
|
|
21
|
+
const {
|
|
22
|
+
apiKey,
|
|
23
|
+
systemPrompt,
|
|
24
|
+
messages,
|
|
25
|
+
maxTokens,
|
|
26
|
+
model,
|
|
27
|
+
onText,
|
|
28
|
+
onStart,
|
|
29
|
+
onEnd,
|
|
30
|
+
} = opts;
|
|
31
|
+
|
|
32
|
+
const anthropic = getClient(apiKey);
|
|
33
|
+
let fullText = '';
|
|
34
|
+
|
|
35
|
+
const stream = await anthropic.messages.stream({
|
|
36
|
+
model,
|
|
37
|
+
max_tokens: maxTokens,
|
|
38
|
+
system: systemPrompt,
|
|
39
|
+
messages,
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
onStart();
|
|
43
|
+
|
|
44
|
+
for await (const event of stream) {
|
|
45
|
+
if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') {
|
|
46
|
+
const text = event.delta.text;
|
|
47
|
+
fullText += text;
|
|
48
|
+
onText(text);
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
onEnd();
|
|
53
|
+
return fullText;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Send a single message (non-streaming).
|
|
58
|
+
*/
|
|
59
|
+
async function sendMessage(opts) {
|
|
60
|
+
const {
|
|
61
|
+
apiKey,
|
|
62
|
+
systemPrompt,
|
|
63
|
+
messages,
|
|
64
|
+
maxTokens,
|
|
65
|
+
model,
|
|
66
|
+
} = opts;
|
|
67
|
+
|
|
68
|
+
const anthropic = getClient(apiKey);
|
|
69
|
+
|
|
70
|
+
const response = await anthropic.messages.create({
|
|
71
|
+
model,
|
|
72
|
+
max_tokens: maxTokens,
|
|
73
|
+
system: systemPrompt,
|
|
74
|
+
messages,
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
return response.content
|
|
78
|
+
.filter((block) => block.type === 'text')
|
|
79
|
+
.map((block) => block.text)
|
|
80
|
+
.join('');
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
module.exports = { streamMessage, sendMessage };
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const OpenAI = require('openai');
|
|
4
|
+
|
|
5
|
+
const clients = {};
|
|
6
|
+
|
|
7
|
+
function getClient(baseURL, apiKey) {
|
|
8
|
+
const cacheKey = baseURL + '::' + (apiKey || 'nokey');
|
|
9
|
+
if (clients[cacheKey]) return clients[cacheKey];
|
|
10
|
+
|
|
11
|
+
clients[cacheKey] = new OpenAI({
|
|
12
|
+
apiKey: apiKey || 'ollama', // Ollama doesn't need a key but SDK requires one
|
|
13
|
+
baseURL,
|
|
14
|
+
});
|
|
15
|
+
|
|
16
|
+
return clients[cacheKey];
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Stream a message using OpenAI-compatible API (Groq, Gemini, OpenRouter, Ollama).
|
|
21
|
+
* System prompt becomes a system message.
|
|
22
|
+
*/
|
|
23
|
+
async function streamMessage(opts) {
|
|
24
|
+
const {
|
|
25
|
+
apiKey,
|
|
26
|
+
baseURL,
|
|
27
|
+
systemPrompt,
|
|
28
|
+
messages,
|
|
29
|
+
maxTokens,
|
|
30
|
+
model,
|
|
31
|
+
onText,
|
|
32
|
+
onStart,
|
|
33
|
+
onEnd,
|
|
34
|
+
} = opts;
|
|
35
|
+
|
|
36
|
+
const openai = getClient(baseURL, apiKey);
|
|
37
|
+
|
|
38
|
+
// Convert Anthropic-style (separate system) to OpenAI-style (system message)
|
|
39
|
+
const fullMessages = [
|
|
40
|
+
{ role: 'system', content: systemPrompt },
|
|
41
|
+
...messages,
|
|
42
|
+
];
|
|
43
|
+
|
|
44
|
+
let fullText = '';
|
|
45
|
+
|
|
46
|
+
const stream = await openai.chat.completions.create({
|
|
47
|
+
model,
|
|
48
|
+
max_tokens: maxTokens,
|
|
49
|
+
messages: fullMessages,
|
|
50
|
+
stream: true,
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
onStart();
|
|
54
|
+
|
|
55
|
+
for await (const chunk of stream) {
|
|
56
|
+
const text = chunk.choices[0]?.delta?.content;
|
|
57
|
+
if (text) {
|
|
58
|
+
fullText += text;
|
|
59
|
+
onText(text);
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
onEnd();
|
|
64
|
+
return fullText;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Send a single message (non-streaming).
|
|
69
|
+
*/
|
|
70
|
+
async function sendMessage(opts) {
|
|
71
|
+
const {
|
|
72
|
+
apiKey,
|
|
73
|
+
baseURL,
|
|
74
|
+
systemPrompt,
|
|
75
|
+
messages,
|
|
76
|
+
maxTokens,
|
|
77
|
+
model,
|
|
78
|
+
} = opts;
|
|
79
|
+
|
|
80
|
+
const openai = getClient(baseURL, apiKey);
|
|
81
|
+
|
|
82
|
+
const fullMessages = [
|
|
83
|
+
{ role: 'system', content: systemPrompt },
|
|
84
|
+
...messages,
|
|
85
|
+
];
|
|
86
|
+
|
|
87
|
+
const response = await openai.chat.completions.create({
|
|
88
|
+
model,
|
|
89
|
+
max_tokens: maxTokens,
|
|
90
|
+
messages: fullMessages,
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
return response.choices[0]?.message?.content || '';
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
module.exports = { streamMessage, sendMessage };
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Provider Registry — defines all supported AI providers.
|
|
5
|
+
*
|
|
6
|
+
* Each provider has:
|
|
7
|
+
* type — 'anthropic' or 'openai' (OpenAI-compatible API)
|
|
8
|
+
* name — Display name
|
|
9
|
+
* base_url — API endpoint (for openai-type providers)
|
|
10
|
+
* default_model — Fallback model
|
|
11
|
+
* models — Available models
|
|
12
|
+
* key_prefix — Expected API key prefix (for validation hint)
|
|
13
|
+
* key_env — Environment variable to check for API key
|
|
14
|
+
* key_url — Where to get an API key
|
|
15
|
+
* requires_key — false for providers that don't need a key (Ollama)
|
|
16
|
+
* free — true if provider has a free tier
|
|
17
|
+
*/
|
|
18
|
+
const PROVIDERS = {
|
|
19
|
+
groq: {
|
|
20
|
+
name: 'Groq',
|
|
21
|
+
type: 'openai',
|
|
22
|
+
base_url: 'https://api.groq.com/openai/v1',
|
|
23
|
+
default_model: 'llama-3.3-70b-versatile',
|
|
24
|
+
models: [
|
|
25
|
+
'llama-3.3-70b-versatile',
|
|
26
|
+
'llama-3.1-8b-instant',
|
|
27
|
+
'mixtral-8x7b-32768',
|
|
28
|
+
'gemma2-9b-it',
|
|
29
|
+
],
|
|
30
|
+
key_prefix: 'gsk_',
|
|
31
|
+
key_env: 'GROQ_API_KEY',
|
|
32
|
+
key_url: 'https://console.groq.com/',
|
|
33
|
+
requires_key: true,
|
|
34
|
+
free: true,
|
|
35
|
+
},
|
|
36
|
+
|
|
37
|
+
anthropic: {
|
|
38
|
+
name: 'Anthropic (Claude)',
|
|
39
|
+
type: 'anthropic',
|
|
40
|
+
base_url: null,
|
|
41
|
+
default_model: 'claude-sonnet-4-20250514',
|
|
42
|
+
models: [
|
|
43
|
+
'claude-sonnet-4-20250514',
|
|
44
|
+
'claude-haiku-4-20250514',
|
|
45
|
+
],
|
|
46
|
+
key_prefix: 'sk-ant-',
|
|
47
|
+
key_env: 'ANTHROPIC_API_KEY',
|
|
48
|
+
key_url: 'https://console.anthropic.com/',
|
|
49
|
+
requires_key: true,
|
|
50
|
+
free: false,
|
|
51
|
+
},
|
|
52
|
+
|
|
53
|
+
gemini: {
|
|
54
|
+
name: 'Google Gemini',
|
|
55
|
+
type: 'openai',
|
|
56
|
+
base_url: 'https://generativelanguage.googleapis.com/v1beta/openai/',
|
|
57
|
+
default_model: 'gemini-2.0-flash',
|
|
58
|
+
models: [
|
|
59
|
+
'gemini-2.0-flash',
|
|
60
|
+
'gemini-1.5-flash',
|
|
61
|
+
'gemini-1.5-pro',
|
|
62
|
+
],
|
|
63
|
+
key_prefix: '',
|
|
64
|
+
key_env: 'GEMINI_API_KEY',
|
|
65
|
+
key_url: 'https://aistudio.google.com/',
|
|
66
|
+
requires_key: true,
|
|
67
|
+
free: true,
|
|
68
|
+
},
|
|
69
|
+
|
|
70
|
+
openrouter: {
|
|
71
|
+
name: 'OpenRouter',
|
|
72
|
+
type: 'openai',
|
|
73
|
+
base_url: 'https://openrouter.ai/api/v1',
|
|
74
|
+
default_model: 'meta-llama/llama-3.3-70b-instruct:free',
|
|
75
|
+
models: [
|
|
76
|
+
'meta-llama/llama-3.3-70b-instruct:free',
|
|
77
|
+
'mistralai/mistral-7b-instruct:free',
|
|
78
|
+
'google/gemma-2-9b-it:free',
|
|
79
|
+
],
|
|
80
|
+
key_prefix: 'sk-or-',
|
|
81
|
+
key_env: 'OPENROUTER_API_KEY',
|
|
82
|
+
key_url: 'https://openrouter.ai/',
|
|
83
|
+
requires_key: true,
|
|
84
|
+
free: true,
|
|
85
|
+
},
|
|
86
|
+
|
|
87
|
+
ollama: {
|
|
88
|
+
name: 'Ollama (Local)',
|
|
89
|
+
type: 'openai',
|
|
90
|
+
base_url: 'http://localhost:11434/v1',
|
|
91
|
+
default_model: 'llama3',
|
|
92
|
+
models: [
|
|
93
|
+
'llama3',
|
|
94
|
+
'llama3.1',
|
|
95
|
+
'codellama',
|
|
96
|
+
'mistral',
|
|
97
|
+
'mixtral',
|
|
98
|
+
'phi3',
|
|
99
|
+
],
|
|
100
|
+
key_prefix: '',
|
|
101
|
+
key_env: '',
|
|
102
|
+
key_url: 'https://ollama.com/',
|
|
103
|
+
requires_key: false,
|
|
104
|
+
free: true,
|
|
105
|
+
},
|
|
106
|
+
};
|
|
107
|
+
|
|
108
|
+
function getProviderDef(name) {
|
|
109
|
+
return PROVIDERS[name] || null;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
function getAllProviderNames() {
|
|
113
|
+
return Object.keys(PROVIDERS);
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
function getAllProviders() {
|
|
117
|
+
return PROVIDERS;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
module.exports = {
|
|
121
|
+
PROVIDERS,
|
|
122
|
+
getProviderDef,
|
|
123
|
+
getAllProviderNames,
|
|
124
|
+
getAllProviders,
|
|
125
|
+
};
|