hedgequantx 2.5.27 → 2.5.29
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/menus/ai-agent.js +27 -5
- package/src/services/ai/providers/index.js +34 -31
package/package.json
CHANGED
package/src/menus/ai-agent.js
CHANGED
|
@@ -181,17 +181,39 @@ const showAgentDetails = async (agent) => {
|
|
|
181
181
|
|
|
182
182
|
console.log(makeLine(chalk.white('NAME: ') + providerColor(agent.name)));
|
|
183
183
|
console.log(makeLine(chalk.white('PROVIDER: ') + chalk.white(agent.provider?.name || agent.providerId)));
|
|
184
|
-
console.log(makeLine(chalk.white('MODEL: ') + chalk.white(agent.model)));
|
|
184
|
+
console.log(makeLine(chalk.white('MODEL: ') + chalk.white(agent.model || 'N/A')));
|
|
185
185
|
console.log(makeLine(chalk.white('STATUS: ') + (agent.isActive ? chalk.green('ACTIVE') : chalk.white('STANDBY'))));
|
|
186
186
|
|
|
187
187
|
console.log(chalk.cyan('╠' + '═'.repeat(W) + '╣'));
|
|
188
188
|
|
|
189
|
+
// Menu in 2 columns
|
|
190
|
+
const colWidth = Math.floor(W / 2);
|
|
191
|
+
|
|
192
|
+
const menuRow = (col1, col2 = '') => {
|
|
193
|
+
const c1Plain = col1.replace(/\x1b\[[0-9;]*m/g, '');
|
|
194
|
+
const c2Plain = col2.replace(/\x1b\[[0-9;]*m/g, '');
|
|
195
|
+
|
|
196
|
+
const pad1Left = Math.floor((colWidth - c1Plain.length) / 2);
|
|
197
|
+
const pad1Right = colWidth - c1Plain.length - pad1Left;
|
|
198
|
+
|
|
199
|
+
const col2Width = W - colWidth;
|
|
200
|
+
const pad2Left = Math.floor((col2Width - c2Plain.length) / 2);
|
|
201
|
+
const pad2Right = col2Width - c2Plain.length - pad2Left;
|
|
202
|
+
|
|
203
|
+
const line =
|
|
204
|
+
' '.repeat(pad1Left) + col1 + ' '.repeat(pad1Right) +
|
|
205
|
+
' '.repeat(pad2Left) + col2 + ' '.repeat(pad2Right);
|
|
206
|
+
|
|
207
|
+
console.log(chalk.cyan('║') + line + chalk.cyan('║'));
|
|
208
|
+
};
|
|
209
|
+
|
|
189
210
|
if (!agent.isActive) {
|
|
190
|
-
|
|
211
|
+
menuRow(chalk.cyan('[A] SET AS ACTIVE'), chalk.yellow('[M] CHANGE MODEL'));
|
|
212
|
+
menuRow(chalk.red('[R] REMOVE'), chalk.white('[<] BACK'));
|
|
213
|
+
} else {
|
|
214
|
+
menuRow(chalk.yellow('[M] CHANGE MODEL'), chalk.red('[R] REMOVE'));
|
|
215
|
+
menuRow(chalk.white('[<] BACK'), '');
|
|
191
216
|
}
|
|
192
|
-
console.log(makeLine(chalk.yellow('[M] CHANGE MODEL')));
|
|
193
|
-
console.log(makeLine(chalk.red('[R] REMOVE')));
|
|
194
|
-
console.log(makeLine(chalk.white('[<] BACK')));
|
|
195
217
|
|
|
196
218
|
drawBoxFooter(boxWidth);
|
|
197
219
|
|
|
@@ -1,6 +1,9 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* AI Providers Configuration
|
|
3
3
|
* Each provider has connection options (API Key, Plans, etc.)
|
|
4
|
+
*
|
|
5
|
+
* IMPORTANT: models arrays are empty - models MUST be fetched from real APIs
|
|
6
|
+
* No hardcoded model lists allowed (see RULES.md)
|
|
4
7
|
*/
|
|
5
8
|
|
|
6
9
|
const PROVIDERS = {
|
|
@@ -66,8 +69,8 @@ const PROVIDERS = {
|
|
|
66
69
|
name: 'OPENAI (GPT-4)',
|
|
67
70
|
description: 'Direct connection to GPT-4',
|
|
68
71
|
category: 'direct',
|
|
69
|
-
models: [
|
|
70
|
-
defaultModel:
|
|
72
|
+
models: [], // Fetched from API at runtime
|
|
73
|
+
defaultModel: null, // Will use first model from API
|
|
71
74
|
options: [
|
|
72
75
|
{
|
|
73
76
|
id: 'api_key',
|
|
@@ -110,8 +113,8 @@ const PROVIDERS = {
|
|
|
110
113
|
name: 'DEEPSEEK',
|
|
111
114
|
description: 'Very cheap & capable',
|
|
112
115
|
category: 'direct',
|
|
113
|
-
models: [
|
|
114
|
-
defaultModel:
|
|
116
|
+
models: [], // Fetched from API at runtime
|
|
117
|
+
defaultModel: null, // Will use first model from API
|
|
115
118
|
options: [
|
|
116
119
|
{
|
|
117
120
|
id: 'api_key',
|
|
@@ -132,8 +135,8 @@ const PROVIDERS = {
|
|
|
132
135
|
name: 'GROQ',
|
|
133
136
|
description: 'Ultra fast inference',
|
|
134
137
|
category: 'direct',
|
|
135
|
-
models: [
|
|
136
|
-
defaultModel:
|
|
138
|
+
models: [], // Fetched from API at runtime
|
|
139
|
+
defaultModel: null, // Will use first model from API
|
|
137
140
|
options: [
|
|
138
141
|
{
|
|
139
142
|
id: 'api_key',
|
|
@@ -155,8 +158,8 @@ const PROVIDERS = {
|
|
|
155
158
|
name: 'GROK (XAI)',
|
|
156
159
|
description: 'Elon Musk\'s Grok AI',
|
|
157
160
|
category: 'direct',
|
|
158
|
-
models: [
|
|
159
|
-
defaultModel:
|
|
161
|
+
models: [], // Fetched from API at runtime
|
|
162
|
+
defaultModel: null, // Will use first model from API
|
|
160
163
|
options: [
|
|
161
164
|
{
|
|
162
165
|
id: 'api_key',
|
|
@@ -177,8 +180,8 @@ const PROVIDERS = {
|
|
|
177
180
|
name: 'MISTRAL',
|
|
178
181
|
description: 'European AI leader',
|
|
179
182
|
category: 'direct',
|
|
180
|
-
models: [
|
|
181
|
-
defaultModel:
|
|
183
|
+
models: [], // Fetched from API at runtime
|
|
184
|
+
defaultModel: null, // Will use first model from API
|
|
182
185
|
options: [
|
|
183
186
|
{
|
|
184
187
|
id: 'api_key',
|
|
@@ -199,8 +202,8 @@ const PROVIDERS = {
|
|
|
199
202
|
name: 'PERPLEXITY',
|
|
200
203
|
description: 'Real-time web search AI',
|
|
201
204
|
category: 'direct',
|
|
202
|
-
models: [
|
|
203
|
-
defaultModel:
|
|
205
|
+
models: [], // Fetched from API at runtime
|
|
206
|
+
defaultModel: null, // Will use first model from API
|
|
204
207
|
options: [
|
|
205
208
|
{
|
|
206
209
|
id: 'api_key',
|
|
@@ -222,8 +225,8 @@ const PROVIDERS = {
|
|
|
222
225
|
name: 'TOGETHER AI',
|
|
223
226
|
description: 'Open source models, fast & cheap',
|
|
224
227
|
category: 'direct',
|
|
225
|
-
models: [
|
|
226
|
-
defaultModel:
|
|
228
|
+
models: [], // Fetched from API at runtime
|
|
229
|
+
defaultModel: null, // Will use first model from API
|
|
227
230
|
options: [
|
|
228
231
|
{
|
|
229
232
|
id: 'api_key',
|
|
@@ -246,8 +249,8 @@ const PROVIDERS = {
|
|
|
246
249
|
name: 'QWEN (ALIBABA)',
|
|
247
250
|
description: 'Alibaba\'s top AI model',
|
|
248
251
|
category: 'direct',
|
|
249
|
-
models: [
|
|
250
|
-
defaultModel:
|
|
252
|
+
models: [], // Fetched from API at runtime
|
|
253
|
+
defaultModel: null, // Will use first model from API
|
|
251
254
|
options: [
|
|
252
255
|
{
|
|
253
256
|
id: 'api_key',
|
|
@@ -269,8 +272,8 @@ const PROVIDERS = {
|
|
|
269
272
|
name: 'MOONSHOT (KIMI)',
|
|
270
273
|
description: '200K context window',
|
|
271
274
|
category: 'direct',
|
|
272
|
-
models: [
|
|
273
|
-
defaultModel:
|
|
275
|
+
models: [], // Fetched from API at runtime
|
|
276
|
+
defaultModel: null, // Will use first model from API
|
|
274
277
|
options: [
|
|
275
278
|
{
|
|
276
279
|
id: 'api_key',
|
|
@@ -292,8 +295,8 @@ const PROVIDERS = {
|
|
|
292
295
|
name: '01.AI (YI)',
|
|
293
296
|
description: 'Yi models by Kai-Fu Lee',
|
|
294
297
|
category: 'direct',
|
|
295
|
-
models: [
|
|
296
|
-
defaultModel:
|
|
298
|
+
models: [], // Fetched from API at runtime
|
|
299
|
+
defaultModel: null, // Will use first model from API
|
|
297
300
|
options: [
|
|
298
301
|
{
|
|
299
302
|
id: 'api_key',
|
|
@@ -315,8 +318,8 @@ const PROVIDERS = {
|
|
|
315
318
|
name: 'ZHIPU AI (GLM)',
|
|
316
319
|
description: 'ChatGLM models',
|
|
317
320
|
category: 'direct',
|
|
318
|
-
models: [
|
|
319
|
-
defaultModel:
|
|
321
|
+
models: [], // Fetched from API at runtime
|
|
322
|
+
defaultModel: null, // Will use first model from API
|
|
320
323
|
options: [
|
|
321
324
|
{
|
|
322
325
|
id: 'api_key',
|
|
@@ -338,15 +341,15 @@ const PROVIDERS = {
|
|
|
338
341
|
name: 'BAICHUAN',
|
|
339
342
|
description: 'Multilingual AI model',
|
|
340
343
|
category: 'direct',
|
|
341
|
-
models: [
|
|
342
|
-
defaultModel:
|
|
344
|
+
models: [], // Fetched from API at runtime
|
|
345
|
+
defaultModel: null, // Will use first model from API
|
|
343
346
|
options: [
|
|
344
347
|
{
|
|
345
348
|
id: 'api_key',
|
|
346
349
|
label: 'API KEY',
|
|
347
350
|
description: [
|
|
348
351
|
'Get key at platform.baichuan-ai.com',
|
|
349
|
-
'
|
|
352
|
+
'Strong multilingual support',
|
|
350
353
|
'Competitive pricing'
|
|
351
354
|
],
|
|
352
355
|
fields: ['apiKey'],
|
|
@@ -362,8 +365,8 @@ const PROVIDERS = {
|
|
|
362
365
|
name: 'OLLAMA (LOCAL - FREE)',
|
|
363
366
|
description: '100% free, runs locally',
|
|
364
367
|
category: 'local',
|
|
365
|
-
models: [
|
|
366
|
-
defaultModel:
|
|
368
|
+
models: [], // Fetched from local API at runtime
|
|
369
|
+
defaultModel: null, // Will use first model from API
|
|
367
370
|
options: [
|
|
368
371
|
{
|
|
369
372
|
id: 'local',
|
|
@@ -385,8 +388,8 @@ const PROVIDERS = {
|
|
|
385
388
|
name: 'LM STUDIO (LOCAL - FREE)',
|
|
386
389
|
description: 'Local with GUI',
|
|
387
390
|
category: 'local',
|
|
388
|
-
models: [],
|
|
389
|
-
defaultModel:
|
|
391
|
+
models: [], // Fetched from local API at runtime
|
|
392
|
+
defaultModel: null, // Will use first model from API
|
|
390
393
|
options: [
|
|
391
394
|
{
|
|
392
395
|
id: 'local',
|
|
@@ -409,8 +412,8 @@ const PROVIDERS = {
|
|
|
409
412
|
name: 'CUSTOM ENDPOINT',
|
|
410
413
|
description: 'Any OpenAI-compatible API',
|
|
411
414
|
category: 'custom',
|
|
412
|
-
models: [],
|
|
413
|
-
defaultModel:
|
|
415
|
+
models: [], // User must specify model
|
|
416
|
+
defaultModel: null,
|
|
414
417
|
options: [
|
|
415
418
|
{
|
|
416
419
|
id: 'custom',
|