hedgequantx 2.4.44 → 2.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/package.json +1 -1
- package/src/app.js +5 -0
- package/src/menus/ai-agent.js +406 -0
- package/src/menus/dashboard.js +17 -5
- package/src/services/ai/index.js +371 -0
- package/src/services/ai/providers/index.js +490 -0
package/README.md
CHANGED
|
@@ -208,7 +208,7 @@ Mirror trades from Lead to Follower accounts.
|
|
|
208
208
|
<div align="center">
|
|
209
209
|
|
|
210
210
|
[](https://discord.gg/UBKCERctZu)
|
|
211
|
-
[](https://github.com/HedgeQuantX/
|
|
211
|
+
[](https://github.com/HedgeQuantX/HedgeQuantX/issues)
|
|
212
212
|
|
|
213
213
|
</div>
|
|
214
214
|
|
package/package.json
CHANGED
package/src/app.js
CHANGED
|
@@ -27,6 +27,7 @@ const {
|
|
|
27
27
|
dashboardMenu,
|
|
28
28
|
handleUpdate,
|
|
29
29
|
} = require('./menus');
|
|
30
|
+
const { aiAgentMenu } = require('./menus/ai-agent');
|
|
30
31
|
|
|
31
32
|
/** @type {Object|null} */
|
|
32
33
|
let currentService = null;
|
|
@@ -336,6 +337,10 @@ const run = async () => {
|
|
|
336
337
|
}
|
|
337
338
|
break;
|
|
338
339
|
|
|
340
|
+
case 'ai_agent':
|
|
341
|
+
await aiAgentMenu();
|
|
342
|
+
break;
|
|
343
|
+
|
|
339
344
|
case 'update':
|
|
340
345
|
await handleUpdate();
|
|
341
346
|
break;
|
|
@@ -0,0 +1,406 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AI Agent Menu
|
|
3
|
+
* Configure AI provider connection
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const chalk = require('chalk');
|
|
7
|
+
const ora = require('ora');
|
|
8
|
+
|
|
9
|
+
const { getLogoWidth, drawBoxHeader, drawBoxFooter } = require('../ui');
|
|
10
|
+
const { prompts } = require('../utils');
|
|
11
|
+
const aiService = require('../services/ai');
|
|
12
|
+
const { getCategories, getProvidersByCategory } = require('../services/ai/providers');
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Main AI Agent menu
|
|
16
|
+
*/
|
|
17
|
+
const aiAgentMenu = async () => {
|
|
18
|
+
const boxWidth = getLogoWidth();
|
|
19
|
+
const W = boxWidth - 2;
|
|
20
|
+
|
|
21
|
+
const makeLine = (content, align = 'left') => {
|
|
22
|
+
const plainLen = content.replace(/\x1b\[[0-9;]*m/g, '').length;
|
|
23
|
+
const padding = W - plainLen;
|
|
24
|
+
if (align === 'center') {
|
|
25
|
+
const leftPad = Math.floor(padding / 2);
|
|
26
|
+
return chalk.cyan('║') + ' '.repeat(leftPad) + content + ' '.repeat(padding - leftPad) + chalk.cyan('║');
|
|
27
|
+
}
|
|
28
|
+
return chalk.cyan('║') + ' ' + content + ' '.repeat(Math.max(0, padding - 1)) + chalk.cyan('║');
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
console.clear();
|
|
32
|
+
drawBoxHeader('AI AGENT', boxWidth);
|
|
33
|
+
|
|
34
|
+
// Show current status
|
|
35
|
+
const connection = aiService.getConnection();
|
|
36
|
+
|
|
37
|
+
if (connection) {
|
|
38
|
+
console.log(makeLine(chalk.green('STATUS: ● CONNECTED'), 'left'));
|
|
39
|
+
console.log(makeLine(chalk.white(`PROVIDER: ${connection.provider.name}`), 'left'));
|
|
40
|
+
console.log(makeLine(chalk.white(`MODEL: ${connection.model}`), 'left'));
|
|
41
|
+
} else {
|
|
42
|
+
console.log(makeLine(chalk.gray('STATUS: ○ NOT CONNECTED'), 'left'));
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
console.log(chalk.cyan('╠' + '═'.repeat(W) + '╣'));
|
|
46
|
+
|
|
47
|
+
// Menu options
|
|
48
|
+
const options = [];
|
|
49
|
+
|
|
50
|
+
if (!connection) {
|
|
51
|
+
options.push({ label: chalk.green('[1] CONNECT PROVIDER'), value: 'connect' });
|
|
52
|
+
} else {
|
|
53
|
+
options.push({ label: chalk.cyan('[1] CHANGE PROVIDER'), value: 'connect' });
|
|
54
|
+
options.push({ label: chalk.yellow('[2] CHANGE MODEL'), value: 'model' });
|
|
55
|
+
options.push({ label: chalk.red('[3] DISCONNECT'), value: 'disconnect' });
|
|
56
|
+
}
|
|
57
|
+
options.push({ label: chalk.gray('[<] BACK'), value: 'back' });
|
|
58
|
+
|
|
59
|
+
for (const opt of options) {
|
|
60
|
+
console.log(makeLine(opt.label, 'left'));
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
drawBoxFooter(boxWidth);
|
|
64
|
+
|
|
65
|
+
const choice = await prompts.textInput(chalk.cyan('SELECT:'));
|
|
66
|
+
|
|
67
|
+
switch (choice?.toLowerCase()) {
|
|
68
|
+
case '1':
|
|
69
|
+
return await selectCategory();
|
|
70
|
+
case '2':
|
|
71
|
+
if (connection) {
|
|
72
|
+
return await selectModel(connection.provider);
|
|
73
|
+
}
|
|
74
|
+
return;
|
|
75
|
+
case '3':
|
|
76
|
+
if (connection) {
|
|
77
|
+
aiService.disconnect();
|
|
78
|
+
console.log(chalk.yellow('\n AI AGENT DISCONNECTED'));
|
|
79
|
+
await prompts.waitForEnter();
|
|
80
|
+
}
|
|
81
|
+
return;
|
|
82
|
+
case '<':
|
|
83
|
+
case 'b':
|
|
84
|
+
return;
|
|
85
|
+
default:
|
|
86
|
+
return;
|
|
87
|
+
}
|
|
88
|
+
};
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Select provider category
|
|
92
|
+
*/
|
|
93
|
+
const selectCategory = async () => {
|
|
94
|
+
const boxWidth = getLogoWidth();
|
|
95
|
+
const W = boxWidth - 2;
|
|
96
|
+
|
|
97
|
+
const makeLine = (content) => {
|
|
98
|
+
const plainLen = content.replace(/\x1b\[[0-9;]*m/g, '').length;
|
|
99
|
+
const padding = W - plainLen;
|
|
100
|
+
return chalk.cyan('║') + ' ' + content + ' '.repeat(Math.max(0, padding - 1)) + chalk.cyan('║');
|
|
101
|
+
};
|
|
102
|
+
|
|
103
|
+
console.clear();
|
|
104
|
+
drawBoxHeader('SELECT PROVIDER TYPE', boxWidth);
|
|
105
|
+
|
|
106
|
+
const categories = getCategories();
|
|
107
|
+
|
|
108
|
+
categories.forEach((cat, index) => {
|
|
109
|
+
const color = cat.id === 'unified' ? chalk.green :
|
|
110
|
+
cat.id === 'local' ? chalk.yellow : chalk.cyan;
|
|
111
|
+
console.log(makeLine(color(`[${index + 1}] ${cat.name}`)));
|
|
112
|
+
console.log(makeLine(chalk.gray(' ' + cat.description)));
|
|
113
|
+
console.log(makeLine(''));
|
|
114
|
+
});
|
|
115
|
+
|
|
116
|
+
console.log(makeLine(chalk.gray('[<] BACK')));
|
|
117
|
+
|
|
118
|
+
drawBoxFooter(boxWidth);
|
|
119
|
+
|
|
120
|
+
const choice = await prompts.textInput(chalk.cyan('SELECT (1-4):'));
|
|
121
|
+
|
|
122
|
+
if (choice === '<' || choice?.toLowerCase() === 'b') {
|
|
123
|
+
return await aiAgentMenu();
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
const index = parseInt(choice) - 1;
|
|
127
|
+
if (isNaN(index) || index < 0 || index >= categories.length) {
|
|
128
|
+
return await aiAgentMenu();
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
const selectedCategory = categories[index];
|
|
132
|
+
return await selectProvider(selectedCategory.id);
|
|
133
|
+
};
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Select AI provider from category
|
|
137
|
+
*/
|
|
138
|
+
const selectProvider = async (categoryId) => {
|
|
139
|
+
const boxWidth = getLogoWidth();
|
|
140
|
+
const W = boxWidth - 2;
|
|
141
|
+
|
|
142
|
+
const makeLine = (content) => {
|
|
143
|
+
const plainLen = content.replace(/\x1b\[[0-9;]*m/g, '').length;
|
|
144
|
+
const padding = W - plainLen;
|
|
145
|
+
return chalk.cyan('║') + ' ' + content + ' '.repeat(Math.max(0, padding - 1)) + chalk.cyan('║');
|
|
146
|
+
};
|
|
147
|
+
|
|
148
|
+
console.clear();
|
|
149
|
+
|
|
150
|
+
const categories = getCategories();
|
|
151
|
+
const category = categories.find(c => c.id === categoryId);
|
|
152
|
+
drawBoxHeader(category.name, boxWidth);
|
|
153
|
+
|
|
154
|
+
const providers = getProvidersByCategory(categoryId);
|
|
155
|
+
|
|
156
|
+
if (providers.length === 0) {
|
|
157
|
+
console.log(makeLine(chalk.gray('No providers in this category')));
|
|
158
|
+
drawBoxFooter(boxWidth);
|
|
159
|
+
await prompts.waitForEnter();
|
|
160
|
+
return await selectCategory();
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// Display providers
|
|
164
|
+
providers.forEach((provider, index) => {
|
|
165
|
+
const isRecommended = provider.id === 'openrouter';
|
|
166
|
+
const color = isRecommended ? chalk.green : chalk.cyan;
|
|
167
|
+
console.log(makeLine(color(`[${index + 1}] ${provider.name}`)));
|
|
168
|
+
console.log(makeLine(chalk.gray(' ' + provider.description)));
|
|
169
|
+
if (provider.models && provider.models.length > 0) {
|
|
170
|
+
const modelList = provider.models.slice(0, 3).join(', ');
|
|
171
|
+
console.log(makeLine(chalk.gray(' Models: ' + modelList + (provider.models.length > 3 ? '...' : ''))));
|
|
172
|
+
}
|
|
173
|
+
console.log(makeLine(''));
|
|
174
|
+
});
|
|
175
|
+
|
|
176
|
+
console.log(makeLine(chalk.gray('[<] BACK')));
|
|
177
|
+
|
|
178
|
+
drawBoxFooter(boxWidth);
|
|
179
|
+
|
|
180
|
+
const choice = await prompts.textInput(chalk.cyan('SELECT PROVIDER:'));
|
|
181
|
+
|
|
182
|
+
if (choice === '<' || choice?.toLowerCase() === 'b') {
|
|
183
|
+
return await selectCategory();
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
const index = parseInt(choice) - 1;
|
|
187
|
+
if (isNaN(index) || index < 0 || index >= providers.length) {
|
|
188
|
+
return await selectCategory();
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
const selectedProvider = providers[index];
|
|
192
|
+
return await selectProviderOption(selectedProvider);
|
|
193
|
+
};
|
|
194
|
+
|
|
195
|
+
/**
|
|
196
|
+
* Select connection option for provider
|
|
197
|
+
*/
|
|
198
|
+
const selectProviderOption = async (provider) => {
|
|
199
|
+
const boxWidth = getLogoWidth();
|
|
200
|
+
const W = boxWidth - 2;
|
|
201
|
+
|
|
202
|
+
const makeLine = (content) => {
|
|
203
|
+
const plainLen = content.replace(/\x1b\[[0-9;]*m/g, '').length;
|
|
204
|
+
const padding = W - plainLen;
|
|
205
|
+
return chalk.cyan('║') + ' ' + content + ' '.repeat(Math.max(0, padding - 1)) + chalk.cyan('║');
|
|
206
|
+
};
|
|
207
|
+
|
|
208
|
+
// If only one option, skip selection
|
|
209
|
+
if (provider.options.length === 1) {
|
|
210
|
+
return await setupConnection(provider, provider.options[0]);
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
console.clear();
|
|
214
|
+
drawBoxHeader(provider.name, boxWidth);
|
|
215
|
+
|
|
216
|
+
console.log(makeLine(chalk.white('SELECT CONNECTION METHOD:')));
|
|
217
|
+
console.log(makeLine(''));
|
|
218
|
+
|
|
219
|
+
provider.options.forEach((option, index) => {
|
|
220
|
+
console.log(makeLine(chalk.cyan(`[${index + 1}] ${option.label}`)));
|
|
221
|
+
option.description.forEach(desc => {
|
|
222
|
+
console.log(makeLine(chalk.gray(' ' + desc)));
|
|
223
|
+
});
|
|
224
|
+
console.log(makeLine(''));
|
|
225
|
+
});
|
|
226
|
+
|
|
227
|
+
console.log(makeLine(chalk.gray('[<] BACK')));
|
|
228
|
+
|
|
229
|
+
drawBoxFooter(boxWidth);
|
|
230
|
+
|
|
231
|
+
const choice = await prompts.textInput(chalk.cyan('SELECT:'));
|
|
232
|
+
|
|
233
|
+
if (choice === '<' || choice?.toLowerCase() === 'b') {
|
|
234
|
+
return await selectProvider(provider.category);
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
const index = parseInt(choice) - 1;
|
|
238
|
+
if (isNaN(index) || index < 0 || index >= provider.options.length) {
|
|
239
|
+
return await selectProvider(provider.category);
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
const selectedOption = provider.options[index];
|
|
243
|
+
return await setupConnection(provider, selectedOption);
|
|
244
|
+
};
|
|
245
|
+
|
|
246
|
+
/**
|
|
247
|
+
* Setup connection with credentials
|
|
248
|
+
*/
|
|
249
|
+
const setupConnection = async (provider, option) => {
|
|
250
|
+
const boxWidth = getLogoWidth();
|
|
251
|
+
const W = boxWidth - 2;
|
|
252
|
+
|
|
253
|
+
const makeLine = (content) => {
|
|
254
|
+
const plainLen = content.replace(/\x1b\[[0-9;]*m/g, '').length;
|
|
255
|
+
const padding = W - plainLen;
|
|
256
|
+
return chalk.cyan('║') + ' ' + content + ' '.repeat(Math.max(0, padding - 1)) + chalk.cyan('║');
|
|
257
|
+
};
|
|
258
|
+
|
|
259
|
+
console.clear();
|
|
260
|
+
drawBoxHeader(`CONNECT TO ${provider.name}`, boxWidth);
|
|
261
|
+
|
|
262
|
+
// Show instructions
|
|
263
|
+
if (option.url) {
|
|
264
|
+
console.log(makeLine(chalk.white('GET YOUR CREDENTIALS:')));
|
|
265
|
+
console.log(makeLine(chalk.cyan(option.url)));
|
|
266
|
+
console.log(makeLine(''));
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
drawBoxFooter(boxWidth);
|
|
270
|
+
console.log();
|
|
271
|
+
|
|
272
|
+
// Collect credentials based on fields
|
|
273
|
+
const credentials = {};
|
|
274
|
+
|
|
275
|
+
for (const field of option.fields) {
|
|
276
|
+
let value;
|
|
277
|
+
|
|
278
|
+
switch (field) {
|
|
279
|
+
case 'apiKey':
|
|
280
|
+
value = await prompts.passwordInput('ENTER API KEY:');
|
|
281
|
+
if (!value) return await selectProviderOption(provider);
|
|
282
|
+
credentials.apiKey = value;
|
|
283
|
+
break;
|
|
284
|
+
|
|
285
|
+
case 'sessionKey':
|
|
286
|
+
value = await prompts.passwordInput('ENTER SESSION KEY:');
|
|
287
|
+
if (!value) return await selectProviderOption(provider);
|
|
288
|
+
credentials.sessionKey = value;
|
|
289
|
+
break;
|
|
290
|
+
|
|
291
|
+
case 'accessToken':
|
|
292
|
+
value = await prompts.passwordInput('ENTER ACCESS TOKEN:');
|
|
293
|
+
if (!value) return await selectProviderOption(provider);
|
|
294
|
+
credentials.accessToken = value;
|
|
295
|
+
break;
|
|
296
|
+
|
|
297
|
+
case 'endpoint':
|
|
298
|
+
const defaultEndpoint = option.defaultEndpoint || '';
|
|
299
|
+
value = await prompts.textInput(`ENDPOINT [${defaultEndpoint || 'required'}]:`);
|
|
300
|
+
credentials.endpoint = value || defaultEndpoint;
|
|
301
|
+
if (!credentials.endpoint) return await selectProviderOption(provider);
|
|
302
|
+
break;
|
|
303
|
+
|
|
304
|
+
case 'model':
|
|
305
|
+
value = await prompts.textInput('MODEL NAME:');
|
|
306
|
+
if (!value) return await selectProviderOption(provider);
|
|
307
|
+
credentials.model = value;
|
|
308
|
+
break;
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
// Validate connection
|
|
313
|
+
const spinner = ora({ text: 'VALIDATING CONNECTION...', color: 'cyan' }).start();
|
|
314
|
+
|
|
315
|
+
const validation = await aiService.validateConnection(provider.id, option.id, credentials);
|
|
316
|
+
|
|
317
|
+
if (!validation.valid) {
|
|
318
|
+
spinner.fail(`CONNECTION FAILED: ${validation.error}`);
|
|
319
|
+
await prompts.waitForEnter();
|
|
320
|
+
return await selectProviderOption(provider);
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
// Save connection
|
|
324
|
+
try {
|
|
325
|
+
const model = credentials.model || provider.defaultModel;
|
|
326
|
+
await aiService.connect(provider.id, option.id, credentials, model);
|
|
327
|
+
spinner.succeed(`CONNECTED TO ${provider.name}`);
|
|
328
|
+
|
|
329
|
+
// Show available models for local providers
|
|
330
|
+
if (validation.models && validation.models.length > 0) {
|
|
331
|
+
console.log(chalk.gray(` AVAILABLE MODELS: ${validation.models.slice(0, 5).join(', ')}`));
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
console.log(chalk.gray(` USING MODEL: ${model}`));
|
|
335
|
+
} catch (error) {
|
|
336
|
+
spinner.fail(`FAILED TO SAVE: ${error.message}`);
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
await prompts.waitForEnter();
|
|
340
|
+
return await aiAgentMenu();
|
|
341
|
+
};
|
|
342
|
+
|
|
343
|
+
/**
|
|
344
|
+
* Select/change model for current provider
|
|
345
|
+
*/
|
|
346
|
+
const selectModel = async (provider) => {
|
|
347
|
+
const boxWidth = getLogoWidth();
|
|
348
|
+
const W = boxWidth - 2;
|
|
349
|
+
|
|
350
|
+
const makeLine = (content) => {
|
|
351
|
+
const plainLen = content.replace(/\x1b\[[0-9;]*m/g, '').length;
|
|
352
|
+
const padding = W - plainLen;
|
|
353
|
+
return chalk.cyan('║') + ' ' + content + ' '.repeat(Math.max(0, padding - 1)) + chalk.cyan('║');
|
|
354
|
+
};
|
|
355
|
+
|
|
356
|
+
console.clear();
|
|
357
|
+
drawBoxHeader('SELECT MODEL', boxWidth);
|
|
358
|
+
|
|
359
|
+
const models = provider.models || [];
|
|
360
|
+
|
|
361
|
+
if (models.length === 0) {
|
|
362
|
+
console.log(makeLine(chalk.gray('No predefined models. Enter model name manually.')));
|
|
363
|
+
drawBoxFooter(boxWidth);
|
|
364
|
+
|
|
365
|
+
const model = await prompts.textInput('ENTER MODEL NAME:');
|
|
366
|
+
if (model) {
|
|
367
|
+
const settings = aiService.getAISettings();
|
|
368
|
+
settings.model = model;
|
|
369
|
+
aiService.saveAISettings(settings);
|
|
370
|
+
console.log(chalk.green(`\n MODEL CHANGED TO: ${model}`));
|
|
371
|
+
}
|
|
372
|
+
await prompts.waitForEnter();
|
|
373
|
+
return await aiAgentMenu();
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
models.forEach((model, index) => {
|
|
377
|
+
console.log(makeLine(chalk.cyan(`[${index + 1}] ${model}`)));
|
|
378
|
+
});
|
|
379
|
+
|
|
380
|
+
console.log(makeLine(''));
|
|
381
|
+
console.log(makeLine(chalk.gray('[<] BACK')));
|
|
382
|
+
|
|
383
|
+
drawBoxFooter(boxWidth);
|
|
384
|
+
|
|
385
|
+
const choice = await prompts.textInput(chalk.cyan('SELECT MODEL:'));
|
|
386
|
+
|
|
387
|
+
if (choice === '<' || choice?.toLowerCase() === 'b') {
|
|
388
|
+
return await aiAgentMenu();
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
const index = parseInt(choice) - 1;
|
|
392
|
+
if (isNaN(index) || index < 0 || index >= models.length) {
|
|
393
|
+
return await aiAgentMenu();
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
const selectedModel = models[index];
|
|
397
|
+
const settings = aiService.getAISettings();
|
|
398
|
+
settings.model = selectedModel;
|
|
399
|
+
aiService.saveAISettings(settings);
|
|
400
|
+
|
|
401
|
+
console.log(chalk.green(`\n MODEL CHANGED TO: ${selectedModel}`));
|
|
402
|
+
await prompts.waitForEnter();
|
|
403
|
+
return await aiAgentMenu();
|
|
404
|
+
};
|
|
405
|
+
|
|
406
|
+
module.exports = { aiAgentMenu };
|
package/src/menus/dashboard.js
CHANGED
|
@@ -10,6 +10,7 @@ const { connections } = require('../services');
|
|
|
10
10
|
const { getLogoWidth, centerText, prepareStdin } = require('../ui');
|
|
11
11
|
const { getCachedStats } = require('../services/stats-cache');
|
|
12
12
|
const { prompts } = require('../utils');
|
|
13
|
+
const aiService = require('../services/ai');
|
|
13
14
|
|
|
14
15
|
/**
|
|
15
16
|
* Dashboard menu after login
|
|
@@ -60,9 +61,18 @@ const dashboardMenu = async (service) => {
|
|
|
60
61
|
pnlDisplay = '--';
|
|
61
62
|
}
|
|
62
63
|
|
|
64
|
+
// AI status
|
|
65
|
+
const aiConnection = aiService.getConnection();
|
|
66
|
+
const aiStatus = aiConnection
|
|
67
|
+
? aiConnection.provider.name.split(' ')[0] // Just "CLAUDE" or "OPENAI"
|
|
68
|
+
: null;
|
|
69
|
+
|
|
63
70
|
// Yellow icons: ✔ for each stat
|
|
64
71
|
const icon = chalk.yellow('✔ ');
|
|
65
|
-
const
|
|
72
|
+
const aiIcon = aiStatus ? chalk.magenta('✔ ') : chalk.gray('○ ');
|
|
73
|
+
const aiText = aiStatus ? chalk.magenta(aiStatus) : chalk.gray('NONE');
|
|
74
|
+
|
|
75
|
+
const statsPlain = `✔ CONNECTIONS: ${statsInfo.connections} ✔ ACCOUNTS: ${statsInfo.accounts} ✔ BALANCE: ${balStr} AI: ${aiStatus || 'NONE'}`;
|
|
66
76
|
const statsLeftPad = Math.max(0, Math.floor((W - statsPlain.length) / 2));
|
|
67
77
|
const statsRightPad = Math.max(0, W - statsPlain.length - statsLeftPad);
|
|
68
78
|
|
|
@@ -70,7 +80,7 @@ const dashboardMenu = async (service) => {
|
|
|
70
80
|
icon + chalk.white(`CONNECTIONS: ${statsInfo.connections}`) + ' ' +
|
|
71
81
|
icon + chalk.white(`ACCOUNTS: ${statsInfo.accounts}`) + ' ' +
|
|
72
82
|
icon + chalk.white('BALANCE: ') + balColor(balStr) + ' ' +
|
|
73
|
-
|
|
83
|
+
aiIcon + chalk.white('AI: ') + aiText +
|
|
74
84
|
' '.repeat(Math.max(0, statsRightPad)) + chalk.cyan('║'));
|
|
75
85
|
}
|
|
76
86
|
|
|
@@ -87,19 +97,21 @@ const dashboardMenu = async (service) => {
|
|
|
87
97
|
};
|
|
88
98
|
|
|
89
99
|
menuRow(chalk.cyan('[1] VIEW ACCOUNTS'), chalk.cyan('[2] VIEW STATS'));
|
|
90
|
-
menuRow(chalk.cyan('[+] ADD PROP-ACCOUNT'), chalk.magenta('[A] ALGO
|
|
91
|
-
menuRow(chalk.
|
|
100
|
+
menuRow(chalk.cyan('[+] ADD PROP-ACCOUNT'), chalk.magenta('[A] ALGO TRADING'));
|
|
101
|
+
menuRow(chalk.magenta('[I] AI AGENT'), chalk.yellow('[U] UPDATE HQX'));
|
|
102
|
+
menuRow(chalk.red('[X] DISCONNECT'), '');
|
|
92
103
|
|
|
93
104
|
console.log(chalk.cyan('╚' + '═'.repeat(W) + '╝'));
|
|
94
105
|
|
|
95
106
|
// Simple input - no duplicate menu
|
|
96
|
-
const input = await prompts.textInput(chalk.cyan('SELECT (1/2/+/A/U/X)'));
|
|
107
|
+
const input = await prompts.textInput(chalk.cyan('SELECT (1/2/+/A/I/U/X)'));
|
|
97
108
|
|
|
98
109
|
const actionMap = {
|
|
99
110
|
'1': 'accounts',
|
|
100
111
|
'2': 'stats',
|
|
101
112
|
'+': 'add_prop_account',
|
|
102
113
|
'a': 'algotrading',
|
|
114
|
+
'i': 'ai_agent',
|
|
103
115
|
'u': 'update',
|
|
104
116
|
'x': 'disconnect'
|
|
105
117
|
};
|
|
@@ -0,0 +1,371 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AI Service Manager
|
|
3
|
+
* Manages AI provider connections and settings
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const { getProviders, getProvider } = require('./providers');
|
|
7
|
+
const { settings } = require('../../config');
|
|
8
|
+
|
|
9
|
+
// In-memory cache of current connection
|
|
10
|
+
let currentConnection = null;
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Get AI settings from storage
|
|
14
|
+
*/
|
|
15
|
+
const getAISettings = () => {
|
|
16
|
+
try {
|
|
17
|
+
return settings.get('ai') || {};
|
|
18
|
+
} catch {
|
|
19
|
+
return {};
|
|
20
|
+
}
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Save AI settings to storage
|
|
25
|
+
*/
|
|
26
|
+
const saveAISettings = (aiSettings) => {
|
|
27
|
+
try {
|
|
28
|
+
settings.set('ai', aiSettings);
|
|
29
|
+
} catch (e) {
|
|
30
|
+
// Silent fail
|
|
31
|
+
}
|
|
32
|
+
};
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Check if AI is connected
|
|
36
|
+
*/
|
|
37
|
+
const isConnected = () => {
|
|
38
|
+
const aiSettings = getAISettings();
|
|
39
|
+
return !!(aiSettings.provider && aiSettings.credentials);
|
|
40
|
+
};
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Get current connection info
|
|
44
|
+
*/
|
|
45
|
+
const getConnection = () => {
|
|
46
|
+
const aiSettings = getAISettings();
|
|
47
|
+
if (!aiSettings.provider) return null;
|
|
48
|
+
|
|
49
|
+
const provider = getProvider(aiSettings.provider);
|
|
50
|
+
if (!provider) return null;
|
|
51
|
+
|
|
52
|
+
return {
|
|
53
|
+
provider: provider,
|
|
54
|
+
option: aiSettings.option,
|
|
55
|
+
model: aiSettings.model || provider.defaultModel,
|
|
56
|
+
connected: true
|
|
57
|
+
};
|
|
58
|
+
};
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Connect to a provider
|
|
62
|
+
*/
|
|
63
|
+
const connect = async (providerId, optionId, credentials, model = null) => {
|
|
64
|
+
const provider = getProvider(providerId);
|
|
65
|
+
if (!provider) {
|
|
66
|
+
throw new Error('Invalid provider');
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
const option = provider.options.find(o => o.id === optionId);
|
|
70
|
+
if (!option) {
|
|
71
|
+
throw new Error('Invalid option');
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// Save to settings
|
|
75
|
+
const aiSettings = {
|
|
76
|
+
provider: providerId,
|
|
77
|
+
option: optionId,
|
|
78
|
+
credentials: credentials,
|
|
79
|
+
model: model || provider.defaultModel
|
|
80
|
+
};
|
|
81
|
+
|
|
82
|
+
saveAISettings(aiSettings);
|
|
83
|
+
currentConnection = getConnection();
|
|
84
|
+
|
|
85
|
+
return currentConnection;
|
|
86
|
+
};
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Disconnect from AI
|
|
90
|
+
*/
|
|
91
|
+
const disconnect = () => {
|
|
92
|
+
saveAISettings({});
|
|
93
|
+
currentConnection = null;
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Get credentials (for API calls)
|
|
98
|
+
*/
|
|
99
|
+
const getCredentials = () => {
|
|
100
|
+
const aiSettings = getAISettings();
|
|
101
|
+
return aiSettings.credentials || null;
|
|
102
|
+
};
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Validate API key with provider
|
|
106
|
+
*/
|
|
107
|
+
const validateConnection = async (providerId, optionId, credentials) => {
|
|
108
|
+
const provider = getProvider(providerId);
|
|
109
|
+
if (!provider) return { valid: false, error: 'Invalid provider' };
|
|
110
|
+
|
|
111
|
+
try {
|
|
112
|
+
switch (providerId) {
|
|
113
|
+
case 'anthropic':
|
|
114
|
+
return await validateAnthropic(credentials);
|
|
115
|
+
case 'openai':
|
|
116
|
+
return await validateOpenAI(credentials);
|
|
117
|
+
case 'gemini':
|
|
118
|
+
return await validateGemini(credentials);
|
|
119
|
+
case 'deepseek':
|
|
120
|
+
return await validateDeepSeek(credentials);
|
|
121
|
+
case 'groq':
|
|
122
|
+
return await validateGroq(credentials);
|
|
123
|
+
case 'ollama':
|
|
124
|
+
return await validateOllama(credentials);
|
|
125
|
+
case 'lmstudio':
|
|
126
|
+
return await validateLMStudio(credentials);
|
|
127
|
+
case 'custom':
|
|
128
|
+
return await validateCustom(credentials);
|
|
129
|
+
// OpenAI-compatible providers (use same validation)
|
|
130
|
+
case 'openrouter':
|
|
131
|
+
return await validateOpenRouter(credentials);
|
|
132
|
+
case 'xai':
|
|
133
|
+
case 'mistral':
|
|
134
|
+
case 'perplexity':
|
|
135
|
+
case 'together':
|
|
136
|
+
case 'qwen':
|
|
137
|
+
case 'moonshot':
|
|
138
|
+
case 'yi':
|
|
139
|
+
case 'zhipu':
|
|
140
|
+
case 'baichuan':
|
|
141
|
+
return await validateOpenAICompatible(provider, credentials);
|
|
142
|
+
default:
|
|
143
|
+
return { valid: false, error: 'Unknown provider' };
|
|
144
|
+
}
|
|
145
|
+
} catch (error) {
|
|
146
|
+
return { valid: false, error: error.message };
|
|
147
|
+
}
|
|
148
|
+
};
|
|
149
|
+
|
|
150
|
+
// Validation functions for each provider
|
|
151
|
+
const validateAnthropic = async (credentials) => {
|
|
152
|
+
try {
|
|
153
|
+
const response = await fetch('https://api.anthropic.com/v1/messages', {
|
|
154
|
+
method: 'POST',
|
|
155
|
+
headers: {
|
|
156
|
+
'Content-Type': 'application/json',
|
|
157
|
+
'x-api-key': credentials.apiKey || credentials.sessionKey,
|
|
158
|
+
'anthropic-version': '2023-06-01'
|
|
159
|
+
},
|
|
160
|
+
body: JSON.stringify({
|
|
161
|
+
model: 'claude-sonnet-4-5-20250929',
|
|
162
|
+
max_tokens: 10,
|
|
163
|
+
messages: [{ role: 'user', content: 'Hi' }]
|
|
164
|
+
})
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
if (response.ok) {
|
|
168
|
+
return { valid: true };
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
const error = await response.json();
|
|
172
|
+
return { valid: false, error: error.error?.message || 'Invalid API key' };
|
|
173
|
+
} catch (e) {
|
|
174
|
+
return { valid: false, error: e.message };
|
|
175
|
+
}
|
|
176
|
+
};
|
|
177
|
+
|
|
178
|
+
const validateOpenAI = async (credentials) => {
|
|
179
|
+
try {
|
|
180
|
+
const response = await fetch('https://api.openai.com/v1/models', {
|
|
181
|
+
headers: {
|
|
182
|
+
'Authorization': `Bearer ${credentials.apiKey || credentials.accessToken}`
|
|
183
|
+
}
|
|
184
|
+
});
|
|
185
|
+
|
|
186
|
+
if (response.ok) {
|
|
187
|
+
return { valid: true };
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
return { valid: false, error: 'Invalid API key' };
|
|
191
|
+
} catch (e) {
|
|
192
|
+
return { valid: false, error: e.message };
|
|
193
|
+
}
|
|
194
|
+
};
|
|
195
|
+
|
|
196
|
+
const validateGemini = async (credentials) => {
|
|
197
|
+
try {
|
|
198
|
+
const response = await fetch(
|
|
199
|
+
`https://generativelanguage.googleapis.com/v1/models?key=${credentials.apiKey}`
|
|
200
|
+
);
|
|
201
|
+
|
|
202
|
+
if (response.ok) {
|
|
203
|
+
return { valid: true };
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
return { valid: false, error: 'Invalid API key' };
|
|
207
|
+
} catch (e) {
|
|
208
|
+
return { valid: false, error: e.message };
|
|
209
|
+
}
|
|
210
|
+
};
|
|
211
|
+
|
|
212
|
+
const validateDeepSeek = async (credentials) => {
|
|
213
|
+
try {
|
|
214
|
+
const response = await fetch('https://api.deepseek.com/v1/models', {
|
|
215
|
+
headers: {
|
|
216
|
+
'Authorization': `Bearer ${credentials.apiKey}`
|
|
217
|
+
}
|
|
218
|
+
});
|
|
219
|
+
|
|
220
|
+
if (response.ok) {
|
|
221
|
+
return { valid: true };
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
return { valid: false, error: 'Invalid API key' };
|
|
225
|
+
} catch (e) {
|
|
226
|
+
return { valid: false, error: e.message };
|
|
227
|
+
}
|
|
228
|
+
};
|
|
229
|
+
|
|
230
|
+
const validateGroq = async (credentials) => {
|
|
231
|
+
try {
|
|
232
|
+
const response = await fetch('https://api.groq.com/openai/v1/models', {
|
|
233
|
+
headers: {
|
|
234
|
+
'Authorization': `Bearer ${credentials.apiKey}`
|
|
235
|
+
}
|
|
236
|
+
});
|
|
237
|
+
|
|
238
|
+
if (response.ok) {
|
|
239
|
+
return { valid: true };
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
return { valid: false, error: 'Invalid API key' };
|
|
243
|
+
} catch (e) {
|
|
244
|
+
return { valid: false, error: e.message };
|
|
245
|
+
}
|
|
246
|
+
};
|
|
247
|
+
|
|
248
|
+
const validateOllama = async (credentials) => {
|
|
249
|
+
try {
|
|
250
|
+
const endpoint = credentials.endpoint || 'http://localhost:11434';
|
|
251
|
+
const response = await fetch(`${endpoint}/api/tags`);
|
|
252
|
+
|
|
253
|
+
if (response.ok) {
|
|
254
|
+
const data = await response.json();
|
|
255
|
+
return {
|
|
256
|
+
valid: true,
|
|
257
|
+
models: data.models?.map(m => m.name) || []
|
|
258
|
+
};
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
return { valid: false, error: 'Cannot connect to Ollama' };
|
|
262
|
+
} catch (e) {
|
|
263
|
+
return { valid: false, error: 'Ollama not running. Start with: ollama serve' };
|
|
264
|
+
}
|
|
265
|
+
};
|
|
266
|
+
|
|
267
|
+
const validateCustom = async (credentials) => {
|
|
268
|
+
try {
|
|
269
|
+
const response = await fetch(`${credentials.endpoint}/models`, {
|
|
270
|
+
headers: credentials.apiKey ? {
|
|
271
|
+
'Authorization': `Bearer ${credentials.apiKey}`
|
|
272
|
+
} : {}
|
|
273
|
+
});
|
|
274
|
+
|
|
275
|
+
if (response.ok) {
|
|
276
|
+
return { valid: true };
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
return { valid: false, error: 'Cannot connect to endpoint' };
|
|
280
|
+
} catch (e) {
|
|
281
|
+
return { valid: false, error: e.message };
|
|
282
|
+
}
|
|
283
|
+
};
|
|
284
|
+
|
|
285
|
+
const validateOpenRouter = async (credentials) => {
|
|
286
|
+
try {
|
|
287
|
+
const response = await fetch('https://openrouter.ai/api/v1/models', {
|
|
288
|
+
headers: {
|
|
289
|
+
'Authorization': `Bearer ${credentials.apiKey}`
|
|
290
|
+
}
|
|
291
|
+
});
|
|
292
|
+
|
|
293
|
+
if (response.ok) {
|
|
294
|
+
return { valid: true };
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
return { valid: false, error: 'Invalid API key' };
|
|
298
|
+
} catch (e) {
|
|
299
|
+
return { valid: false, error: e.message };
|
|
300
|
+
}
|
|
301
|
+
};
|
|
302
|
+
|
|
303
|
+
const validateLMStudio = async (credentials) => {
|
|
304
|
+
try {
|
|
305
|
+
const endpoint = credentials.endpoint || 'http://localhost:1234/v1';
|
|
306
|
+
const response = await fetch(`${endpoint}/models`);
|
|
307
|
+
|
|
308
|
+
if (response.ok) {
|
|
309
|
+
const data = await response.json();
|
|
310
|
+
return {
|
|
311
|
+
valid: true,
|
|
312
|
+
models: data.data?.map(m => m.id) || []
|
|
313
|
+
};
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
return { valid: false, error: 'Cannot connect to LM Studio' };
|
|
317
|
+
} catch (e) {
|
|
318
|
+
return { valid: false, error: 'LM Studio not running. Start local server first.' };
|
|
319
|
+
}
|
|
320
|
+
};
|
|
321
|
+
|
|
322
|
+
const validateOpenAICompatible = async (provider, credentials) => {
|
|
323
|
+
try {
|
|
324
|
+
const endpoint = provider.endpoint;
|
|
325
|
+
const response = await fetch(`${endpoint}/models`, {
|
|
326
|
+
headers: {
|
|
327
|
+
'Authorization': `Bearer ${credentials.apiKey}`,
|
|
328
|
+
'Content-Type': 'application/json'
|
|
329
|
+
}
|
|
330
|
+
});
|
|
331
|
+
|
|
332
|
+
if (response.ok) {
|
|
333
|
+
return { valid: true };
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
// Some providers don't have /models endpoint, try a simple chat
|
|
337
|
+
const chatResponse = await fetch(`${endpoint}/chat/completions`, {
|
|
338
|
+
method: 'POST',
|
|
339
|
+
headers: {
|
|
340
|
+
'Authorization': `Bearer ${credentials.apiKey}`,
|
|
341
|
+
'Content-Type': 'application/json'
|
|
342
|
+
},
|
|
343
|
+
body: JSON.stringify({
|
|
344
|
+
model: provider.defaultModel,
|
|
345
|
+
messages: [{ role: 'user', content: 'hi' }],
|
|
346
|
+
max_tokens: 5
|
|
347
|
+
})
|
|
348
|
+
});
|
|
349
|
+
|
|
350
|
+
if (chatResponse.ok) {
|
|
351
|
+
return { valid: true };
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
return { valid: false, error: 'Invalid API key or endpoint' };
|
|
355
|
+
} catch (e) {
|
|
356
|
+
return { valid: false, error: e.message };
|
|
357
|
+
}
|
|
358
|
+
};
|
|
359
|
+
|
|
360
|
+
module.exports = {
|
|
361
|
+
getProviders,
|
|
362
|
+
getProvider,
|
|
363
|
+
isConnected,
|
|
364
|
+
getConnection,
|
|
365
|
+
connect,
|
|
366
|
+
disconnect,
|
|
367
|
+
getCredentials,
|
|
368
|
+
validateConnection,
|
|
369
|
+
getAISettings,
|
|
370
|
+
saveAISettings
|
|
371
|
+
};
|
|
@@ -0,0 +1,490 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AI Providers Configuration
|
|
3
|
+
* Each provider has connection options (API Key, Plans, etc.)
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const PROVIDERS = {
|
|
7
|
+
// ========== UNIFIED PROVIDERS (RECOMMENDED) ==========
|
|
8
|
+
openrouter: {
|
|
9
|
+
id: 'openrouter',
|
|
10
|
+
name: 'OPENROUTER (RECOMMENDED)',
|
|
11
|
+
description: '1 API key for 100+ models',
|
|
12
|
+
category: 'unified',
|
|
13
|
+
models: [
|
|
14
|
+
'anthropic/claude-sonnet-4',
|
|
15
|
+
'anthropic/claude-3-opus',
|
|
16
|
+
'openai/gpt-4o',
|
|
17
|
+
'openai/gpt-4-turbo',
|
|
18
|
+
'google/gemini-pro-1.5',
|
|
19
|
+
'meta-llama/llama-3-70b',
|
|
20
|
+
'mistralai/mistral-large',
|
|
21
|
+
'deepseek/deepseek-chat'
|
|
22
|
+
],
|
|
23
|
+
defaultModel: 'anthropic/claude-sonnet-4',
|
|
24
|
+
options: [
|
|
25
|
+
{
|
|
26
|
+
id: 'api_key',
|
|
27
|
+
label: 'API KEY',
|
|
28
|
+
description: [
|
|
29
|
+
'Get key at openrouter.ai/keys',
|
|
30
|
+
'Access to Claude, GPT-4, Gemini, Llama & more',
|
|
31
|
+
'Pay-per-use, no subscriptions'
|
|
32
|
+
],
|
|
33
|
+
fields: ['apiKey'],
|
|
34
|
+
url: 'https://openrouter.ai/keys'
|
|
35
|
+
}
|
|
36
|
+
],
|
|
37
|
+
endpoint: 'https://openrouter.ai/api/v1'
|
|
38
|
+
},
|
|
39
|
+
|
|
40
|
+
// ========== DIRECT PROVIDERS ==========
|
|
41
|
+
anthropic: {
|
|
42
|
+
id: 'anthropic',
|
|
43
|
+
name: 'CLAUDE (ANTHROPIC)',
|
|
44
|
+
description: 'Direct connection to Claude',
|
|
45
|
+
category: 'direct',
|
|
46
|
+
models: ['claude-sonnet-4-5-20250929', 'claude-3-opus-20240229', 'claude-3-haiku-20240307'],
|
|
47
|
+
defaultModel: 'claude-sonnet-4-5-20250929',
|
|
48
|
+
options: [
|
|
49
|
+
{
|
|
50
|
+
id: 'api_key',
|
|
51
|
+
label: 'API KEY (PAY-PER-USE)',
|
|
52
|
+
description: [
|
|
53
|
+
'Get key at console.anthropic.com',
|
|
54
|
+
'~$0.10 per trading session'
|
|
55
|
+
],
|
|
56
|
+
fields: ['apiKey'],
|
|
57
|
+
url: 'https://console.anthropic.com'
|
|
58
|
+
},
|
|
59
|
+
{
|
|
60
|
+
id: 'max_plan',
|
|
61
|
+
label: 'MAX PLAN ($100/MONTH)',
|
|
62
|
+
description: [
|
|
63
|
+
'Subscribe at claude.ai',
|
|
64
|
+
'Unlimited usage'
|
|
65
|
+
],
|
|
66
|
+
fields: ['sessionKey'],
|
|
67
|
+
url: 'https://claude.ai'
|
|
68
|
+
}
|
|
69
|
+
],
|
|
70
|
+
endpoint: 'https://api.anthropic.com/v1'
|
|
71
|
+
},
|
|
72
|
+
|
|
73
|
+
openai: {
|
|
74
|
+
id: 'openai',
|
|
75
|
+
name: 'OPENAI (GPT-4)',
|
|
76
|
+
description: 'Direct connection to GPT-4',
|
|
77
|
+
category: 'direct',
|
|
78
|
+
models: ['gpt-4o', 'gpt-4-turbo', 'gpt-4o-mini', 'gpt-3.5-turbo'],
|
|
79
|
+
defaultModel: 'gpt-4o',
|
|
80
|
+
options: [
|
|
81
|
+
{
|
|
82
|
+
id: 'api_key',
|
|
83
|
+
label: 'API KEY (PAY-PER-USE)',
|
|
84
|
+
description: [
|
|
85
|
+
'Get key at platform.openai.com',
|
|
86
|
+
'~$0.15 per trading session'
|
|
87
|
+
],
|
|
88
|
+
fields: ['apiKey'],
|
|
89
|
+
url: 'https://platform.openai.com/api-keys'
|
|
90
|
+
},
|
|
91
|
+
{
|
|
92
|
+
id: 'plus_plan',
|
|
93
|
+
label: 'PLUS PLAN ($20/MONTH)',
|
|
94
|
+
description: [
|
|
95
|
+
'Subscribe at chat.openai.com',
|
|
96
|
+
'GPT-4 access included'
|
|
97
|
+
],
|
|
98
|
+
fields: ['accessToken'],
|
|
99
|
+
url: 'https://chat.openai.com'
|
|
100
|
+
}
|
|
101
|
+
],
|
|
102
|
+
endpoint: 'https://api.openai.com/v1'
|
|
103
|
+
},
|
|
104
|
+
|
|
105
|
+
gemini: {
|
|
106
|
+
id: 'gemini',
|
|
107
|
+
name: 'GEMINI (GOOGLE)',
|
|
108
|
+
description: 'Direct connection to Gemini',
|
|
109
|
+
category: 'direct',
|
|
110
|
+
models: ['gemini-1.5-pro', 'gemini-1.5-flash', 'gemini-pro'],
|
|
111
|
+
defaultModel: 'gemini-1.5-flash',
|
|
112
|
+
options: [
|
|
113
|
+
{
|
|
114
|
+
id: 'api_key',
|
|
115
|
+
label: 'API KEY (FREE TIER)',
|
|
116
|
+
description: [
|
|
117
|
+
'Get key at aistudio.google.com',
|
|
118
|
+
'Free tier: 60 requests/min'
|
|
119
|
+
],
|
|
120
|
+
fields: ['apiKey'],
|
|
121
|
+
url: 'https://aistudio.google.com/apikey'
|
|
122
|
+
}
|
|
123
|
+
],
|
|
124
|
+
endpoint: 'https://generativelanguage.googleapis.com/v1'
|
|
125
|
+
},
|
|
126
|
+
|
|
127
|
+
deepseek: {
|
|
128
|
+
id: 'deepseek',
|
|
129
|
+
name: 'DEEPSEEK',
|
|
130
|
+
description: 'Very cheap & capable',
|
|
131
|
+
category: 'direct',
|
|
132
|
+
models: ['deepseek-chat', 'deepseek-coder'],
|
|
133
|
+
defaultModel: 'deepseek-chat',
|
|
134
|
+
options: [
|
|
135
|
+
{
|
|
136
|
+
id: 'api_key',
|
|
137
|
+
label: 'API KEY (VERY CHEAP)',
|
|
138
|
+
description: [
|
|
139
|
+
'Get key at platform.deepseek.com',
|
|
140
|
+
'~$0.02 per trading session'
|
|
141
|
+
],
|
|
142
|
+
fields: ['apiKey'],
|
|
143
|
+
url: 'https://platform.deepseek.com'
|
|
144
|
+
}
|
|
145
|
+
],
|
|
146
|
+
endpoint: 'https://api.deepseek.com/v1'
|
|
147
|
+
},
|
|
148
|
+
|
|
149
|
+
groq: {
|
|
150
|
+
id: 'groq',
|
|
151
|
+
name: 'GROQ',
|
|
152
|
+
description: 'Ultra fast inference',
|
|
153
|
+
category: 'direct',
|
|
154
|
+
models: ['llama-3.3-70b-versatile', 'llama-3.1-8b-instant', 'mixtral-8x7b-32768'],
|
|
155
|
+
defaultModel: 'llama-3.3-70b-versatile',
|
|
156
|
+
options: [
|
|
157
|
+
{
|
|
158
|
+
id: 'api_key',
|
|
159
|
+
label: 'API KEY (FREE TIER)',
|
|
160
|
+
description: [
|
|
161
|
+
'Get key at console.groq.com',
|
|
162
|
+
'Generous free tier',
|
|
163
|
+
'Ultra low latency'
|
|
164
|
+
],
|
|
165
|
+
fields: ['apiKey'],
|
|
166
|
+
url: 'https://console.groq.com/keys'
|
|
167
|
+
}
|
|
168
|
+
],
|
|
169
|
+
endpoint: 'https://api.groq.com/openai/v1'
|
|
170
|
+
},
|
|
171
|
+
|
|
172
|
+
xai: {
|
|
173
|
+
id: 'xai',
|
|
174
|
+
name: 'GROK (XAI)',
|
|
175
|
+
description: 'Elon Musk\'s Grok AI',
|
|
176
|
+
category: 'direct',
|
|
177
|
+
models: ['grok-beta', 'grok-2'],
|
|
178
|
+
defaultModel: 'grok-beta',
|
|
179
|
+
options: [
|
|
180
|
+
{
|
|
181
|
+
id: 'api_key',
|
|
182
|
+
label: 'API KEY',
|
|
183
|
+
description: [
|
|
184
|
+
'Get key at console.x.ai',
|
|
185
|
+
'Grok models from xAI'
|
|
186
|
+
],
|
|
187
|
+
fields: ['apiKey'],
|
|
188
|
+
url: 'https://console.x.ai'
|
|
189
|
+
}
|
|
190
|
+
],
|
|
191
|
+
endpoint: 'https://api.x.ai/v1'
|
|
192
|
+
},
|
|
193
|
+
|
|
194
|
+
mistral: {
|
|
195
|
+
id: 'mistral',
|
|
196
|
+
name: 'MISTRAL',
|
|
197
|
+
description: 'European AI leader',
|
|
198
|
+
category: 'direct',
|
|
199
|
+
models: ['mistral-large-latest', 'mistral-medium', 'mistral-small'],
|
|
200
|
+
defaultModel: 'mistral-large-latest',
|
|
201
|
+
options: [
|
|
202
|
+
{
|
|
203
|
+
id: 'api_key',
|
|
204
|
+
label: 'API KEY',
|
|
205
|
+
description: [
|
|
206
|
+
'Get key at console.mistral.ai',
|
|
207
|
+
'Fast European models'
|
|
208
|
+
],
|
|
209
|
+
fields: ['apiKey'],
|
|
210
|
+
url: 'https://console.mistral.ai'
|
|
211
|
+
}
|
|
212
|
+
],
|
|
213
|
+
endpoint: 'https://api.mistral.ai/v1'
|
|
214
|
+
},
|
|
215
|
+
|
|
216
|
+
perplexity: {
|
|
217
|
+
id: 'perplexity',
|
|
218
|
+
name: 'PERPLEXITY',
|
|
219
|
+
description: 'Real-time web search AI',
|
|
220
|
+
category: 'direct',
|
|
221
|
+
models: ['llama-3.1-sonar-large-128k-online', 'llama-3.1-sonar-small-128k-online', 'llama-3.1-sonar-huge-128k-online'],
|
|
222
|
+
defaultModel: 'llama-3.1-sonar-large-128k-online',
|
|
223
|
+
options: [
|
|
224
|
+
{
|
|
225
|
+
id: 'api_key',
|
|
226
|
+
label: 'API KEY',
|
|
227
|
+
description: [
|
|
228
|
+
'Get key at perplexity.ai/settings/api',
|
|
229
|
+
'Real-time market news & data',
|
|
230
|
+
'Web search integrated'
|
|
231
|
+
],
|
|
232
|
+
fields: ['apiKey'],
|
|
233
|
+
url: 'https://www.perplexity.ai/settings/api'
|
|
234
|
+
}
|
|
235
|
+
],
|
|
236
|
+
endpoint: 'https://api.perplexity.ai'
|
|
237
|
+
},
|
|
238
|
+
|
|
239
|
+
together: {
|
|
240
|
+
id: 'together',
|
|
241
|
+
name: 'TOGETHER AI',
|
|
242
|
+
description: 'Open source models, fast & cheap',
|
|
243
|
+
category: 'direct',
|
|
244
|
+
models: ['meta-llama/Llama-3.3-70B-Instruct-Turbo', 'mistralai/Mixtral-8x22B-Instruct-v0.1', 'Qwen/Qwen2.5-72B-Instruct-Turbo'],
|
|
245
|
+
defaultModel: 'meta-llama/Llama-3.3-70B-Instruct-Turbo',
|
|
246
|
+
options: [
|
|
247
|
+
{
|
|
248
|
+
id: 'api_key',
|
|
249
|
+
label: 'API KEY',
|
|
250
|
+
description: [
|
|
251
|
+
'Get key at api.together.xyz',
|
|
252
|
+
'100+ open source models',
|
|
253
|
+
'Fast inference, good pricing'
|
|
254
|
+
],
|
|
255
|
+
fields: ['apiKey'],
|
|
256
|
+
url: 'https://api.together.xyz/settings/api-keys'
|
|
257
|
+
}
|
|
258
|
+
],
|
|
259
|
+
endpoint: 'https://api.together.xyz/v1'
|
|
260
|
+
},
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
qwen: {
|
|
264
|
+
id: 'qwen',
|
|
265
|
+
name: 'QWEN (ALIBABA)',
|
|
266
|
+
description: 'Alibaba\'s top AI model',
|
|
267
|
+
category: 'direct',
|
|
268
|
+
models: ['qwen-turbo', 'qwen-plus', 'qwen-max'],
|
|
269
|
+
defaultModel: 'qwen-plus',
|
|
270
|
+
options: [
|
|
271
|
+
{
|
|
272
|
+
id: 'api_key',
|
|
273
|
+
label: 'API KEY (DASHSCOPE)',
|
|
274
|
+
description: [
|
|
275
|
+
'Get key at dashscope.aliyun.com',
|
|
276
|
+
'Qwen2.5 models',
|
|
277
|
+
'Very competitive pricing'
|
|
278
|
+
],
|
|
279
|
+
fields: ['apiKey'],
|
|
280
|
+
url: 'https://dashscope.console.aliyun.com/apiKey'
|
|
281
|
+
}
|
|
282
|
+
],
|
|
283
|
+
endpoint: 'https://dashscope.aliyuncs.com/compatible-mode/v1'
|
|
284
|
+
},
|
|
285
|
+
|
|
286
|
+
moonshot: {
|
|
287
|
+
id: 'moonshot',
|
|
288
|
+
name: 'MOONSHOT (KIMI)',
|
|
289
|
+
description: '200K context window',
|
|
290
|
+
category: 'direct',
|
|
291
|
+
models: ['moonshot-v1-8k', 'moonshot-v1-32k', 'moonshot-v1-128k'],
|
|
292
|
+
defaultModel: 'moonshot-v1-32k',
|
|
293
|
+
options: [
|
|
294
|
+
{
|
|
295
|
+
id: 'api_key',
|
|
296
|
+
label: 'API KEY',
|
|
297
|
+
description: [
|
|
298
|
+
'Get key at platform.moonshot.cn',
|
|
299
|
+
'Up to 200K context',
|
|
300
|
+
'Good for long documents'
|
|
301
|
+
],
|
|
302
|
+
fields: ['apiKey'],
|
|
303
|
+
url: 'https://platform.moonshot.cn/console/api-keys'
|
|
304
|
+
}
|
|
305
|
+
],
|
|
306
|
+
endpoint: 'https://api.moonshot.cn/v1'
|
|
307
|
+
},
|
|
308
|
+
|
|
309
|
+
yi: {
|
|
310
|
+
id: 'yi',
|
|
311
|
+
name: '01.AI (YI)',
|
|
312
|
+
description: 'Yi models by Kai-Fu Lee',
|
|
313
|
+
category: 'direct',
|
|
314
|
+
models: ['yi-large', 'yi-medium', 'yi-spark'],
|
|
315
|
+
defaultModel: 'yi-large',
|
|
316
|
+
options: [
|
|
317
|
+
{
|
|
318
|
+
id: 'api_key',
|
|
319
|
+
label: 'API KEY',
|
|
320
|
+
description: [
|
|
321
|
+
'Get key at platform.01.ai',
|
|
322
|
+
'Yi-Large: GPT-4 level',
|
|
323
|
+
'Affordable pricing'
|
|
324
|
+
],
|
|
325
|
+
fields: ['apiKey'],
|
|
326
|
+
url: 'https://platform.01.ai'
|
|
327
|
+
}
|
|
328
|
+
],
|
|
329
|
+
endpoint: 'https://api.01.ai/v1'
|
|
330
|
+
},
|
|
331
|
+
|
|
332
|
+
zhipu: {
|
|
333
|
+
id: 'zhipu',
|
|
334
|
+
name: 'ZHIPU AI (GLM)',
|
|
335
|
+
description: 'ChatGLM models',
|
|
336
|
+
category: 'direct',
|
|
337
|
+
models: ['glm-4-plus', 'glm-4', 'glm-4-flash'],
|
|
338
|
+
defaultModel: 'glm-4',
|
|
339
|
+
options: [
|
|
340
|
+
{
|
|
341
|
+
id: 'api_key',
|
|
342
|
+
label: 'API KEY',
|
|
343
|
+
description: [
|
|
344
|
+
'Get key at open.bigmodel.cn',
|
|
345
|
+
'ChatGLM-4 models',
|
|
346
|
+
'Strong multilingual'
|
|
347
|
+
],
|
|
348
|
+
fields: ['apiKey'],
|
|
349
|
+
url: 'https://open.bigmodel.cn/usercenter/apikeys'
|
|
350
|
+
}
|
|
351
|
+
],
|
|
352
|
+
endpoint: 'https://open.bigmodel.cn/api/paas/v4'
|
|
353
|
+
},
|
|
354
|
+
|
|
355
|
+
baichuan: {
|
|
356
|
+
id: 'baichuan',
|
|
357
|
+
name: 'BAICHUAN',
|
|
358
|
+
description: 'Chinese language specialist',
|
|
359
|
+
category: 'direct',
|
|
360
|
+
models: ['Baichuan4', 'Baichuan3-Turbo', 'Baichuan2-Turbo'],
|
|
361
|
+
defaultModel: 'Baichuan4',
|
|
362
|
+
options: [
|
|
363
|
+
{
|
|
364
|
+
id: 'api_key',
|
|
365
|
+
label: 'API KEY',
|
|
366
|
+
description: [
|
|
367
|
+
'Get key at platform.baichuan-ai.com',
|
|
368
|
+
'Best for Chinese content',
|
|
369
|
+
'Competitive pricing'
|
|
370
|
+
],
|
|
371
|
+
fields: ['apiKey'],
|
|
372
|
+
url: 'https://platform.baichuan-ai.com/console/apikey'
|
|
373
|
+
}
|
|
374
|
+
],
|
|
375
|
+
endpoint: 'https://api.baichuan-ai.com/v1'
|
|
376
|
+
},
|
|
377
|
+
|
|
378
|
+
// ========== LOCAL / FREE ==========
|
|
379
|
+
ollama: {
|
|
380
|
+
id: 'ollama',
|
|
381
|
+
name: 'OLLAMA (LOCAL - FREE)',
|
|
382
|
+
description: '100% free, runs locally',
|
|
383
|
+
category: 'local',
|
|
384
|
+
models: ['llama3', 'llama3.1', 'mistral', 'codellama', 'phi3', 'gemma2'],
|
|
385
|
+
defaultModel: 'llama3.1',
|
|
386
|
+
options: [
|
|
387
|
+
{
|
|
388
|
+
id: 'local',
|
|
389
|
+
label: 'LOCAL INSTALLATION (FREE)',
|
|
390
|
+
description: [
|
|
391
|
+
'Download at ollama.ai',
|
|
392
|
+
'100% free, no API key needed',
|
|
393
|
+
'Run: ollama pull llama3.1'
|
|
394
|
+
],
|
|
395
|
+
fields: ['endpoint'],
|
|
396
|
+
url: 'https://ollama.ai',
|
|
397
|
+
defaultEndpoint: 'http://localhost:11434'
|
|
398
|
+
}
|
|
399
|
+
]
|
|
400
|
+
},
|
|
401
|
+
|
|
402
|
+
lmstudio: {
|
|
403
|
+
id: 'lmstudio',
|
|
404
|
+
name: 'LM STUDIO (LOCAL - FREE)',
|
|
405
|
+
description: 'Local with GUI',
|
|
406
|
+
category: 'local',
|
|
407
|
+
models: [],
|
|
408
|
+
defaultModel: '',
|
|
409
|
+
options: [
|
|
410
|
+
{
|
|
411
|
+
id: 'local',
|
|
412
|
+
label: 'LOCAL SERVER (FREE)',
|
|
413
|
+
description: [
|
|
414
|
+
'Download at lmstudio.ai',
|
|
415
|
+
'GUI for local models',
|
|
416
|
+
'OpenAI-compatible API'
|
|
417
|
+
],
|
|
418
|
+
fields: ['endpoint'],
|
|
419
|
+
url: 'https://lmstudio.ai',
|
|
420
|
+
defaultEndpoint: 'http://localhost:1234/v1'
|
|
421
|
+
}
|
|
422
|
+
]
|
|
423
|
+
},
|
|
424
|
+
|
|
425
|
+
// ========== CUSTOM ==========
|
|
426
|
+
custom: {
|
|
427
|
+
id: 'custom',
|
|
428
|
+
name: 'CUSTOM ENDPOINT',
|
|
429
|
+
description: 'Any OpenAI-compatible API',
|
|
430
|
+
category: 'custom',
|
|
431
|
+
models: [],
|
|
432
|
+
defaultModel: '',
|
|
433
|
+
options: [
|
|
434
|
+
{
|
|
435
|
+
id: 'custom',
|
|
436
|
+
label: 'CUSTOM OPENAI-COMPATIBLE API',
|
|
437
|
+
description: [
|
|
438
|
+
'Self-hosted models',
|
|
439
|
+
'vLLM, TGI, etc.',
|
|
440
|
+
'Any OpenAI-compatible endpoint'
|
|
441
|
+
],
|
|
442
|
+
fields: ['endpoint', 'apiKey', 'model']
|
|
443
|
+
}
|
|
444
|
+
]
|
|
445
|
+
}
|
|
446
|
+
};
|
|
447
|
+
|
|
448
|
+
/**
|
|
449
|
+
* Get all providers
|
|
450
|
+
*/
|
|
451
|
+
const getProviders = () => Object.values(PROVIDERS);
|
|
452
|
+
|
|
453
|
+
/**
|
|
454
|
+
* Get providers by category
|
|
455
|
+
*/
|
|
456
|
+
const getProvidersByCategory = (category) => {
|
|
457
|
+
return Object.values(PROVIDERS).filter(p => p.category === category);
|
|
458
|
+
};
|
|
459
|
+
|
|
460
|
+
/**
|
|
461
|
+
* Get provider by ID
|
|
462
|
+
*/
|
|
463
|
+
const getProvider = (id) => PROVIDERS[id] || null;
|
|
464
|
+
|
|
465
|
+
/**
|
|
466
|
+
* Get provider options
|
|
467
|
+
*/
|
|
468
|
+
const getProviderOptions = (id) => {
|
|
469
|
+
const provider = PROVIDERS[id];
|
|
470
|
+
return provider ? provider.options : [];
|
|
471
|
+
};
|
|
472
|
+
|
|
473
|
+
/**
|
|
474
|
+
* Get categories
|
|
475
|
+
*/
|
|
476
|
+
const getCategories = () => [
|
|
477
|
+
{ id: 'unified', name: 'UNIFIED (RECOMMENDED)', description: '1 API key for multiple models' },
|
|
478
|
+
{ id: 'direct', name: 'DIRECT PROVIDERS', description: 'Connect directly to each provider' },
|
|
479
|
+
{ id: 'local', name: 'LOCAL (FREE)', description: 'Run models on your machine' },
|
|
480
|
+
{ id: 'custom', name: 'CUSTOM', description: 'Self-hosted solutions' }
|
|
481
|
+
];
|
|
482
|
+
|
|
483
|
+
module.exports = {
|
|
484
|
+
PROVIDERS,
|
|
485
|
+
getProviders,
|
|
486
|
+
getProvidersByCategory,
|
|
487
|
+
getProvider,
|
|
488
|
+
getProviderOptions,
|
|
489
|
+
getCategories
|
|
490
|
+
};
|