hedgequantx 2.8.2 → 2.8.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/pages/ai-agents-ui.js +6 -4
- package/src/pages/ai-models.js +166 -28
package/package.json
CHANGED
|
@@ -316,18 +316,20 @@ const drawConnectionTest = async (agents, boxWidth, clearWithBanner) => {
|
|
|
316
316
|
return { success: false, error: 'No agents' };
|
|
317
317
|
}
|
|
318
318
|
|
|
319
|
-
clearWithBanner();
|
|
320
319
|
const W = boxWidth - 2;
|
|
321
320
|
|
|
321
|
+
// Show loading state with complete box
|
|
322
|
+
clearWithBanner();
|
|
322
323
|
console.log(chalk.cyan('╔' + '═'.repeat(W) + '╗'));
|
|
323
324
|
console.log(chalk.cyan('║') + chalk.yellow.bold(centerText('AI AGENTS CONNECTION TEST', W)) + chalk.cyan('║'));
|
|
324
325
|
console.log(chalk.cyan('╠' + '═'.repeat(W) + '╣'));
|
|
326
|
+
const loadingText = ' Testing connections... Please wait';
|
|
327
|
+
console.log(chalk.cyan('║') + chalk.yellow(loadingText) + ' '.repeat(W - loadingText.length) + chalk.cyan('║'));
|
|
325
328
|
console.log(chalk.cyan('║') + ' '.repeat(W) + chalk.cyan('║'));
|
|
329
|
+
console.log(chalk.cyan('╚' + '═'.repeat(W) + '╝'));
|
|
326
330
|
|
|
327
|
-
// Run pre-flight check
|
|
328
|
-
const spinner = ora({ text: 'Testing connections...', color: 'yellow' }).start();
|
|
331
|
+
// Run pre-flight check (no spinner, box stays complete)
|
|
329
332
|
const results = await runPreflightCheck(agents);
|
|
330
|
-
spinner.stop();
|
|
331
333
|
|
|
332
334
|
// Clear and redraw with results
|
|
333
335
|
clearWithBanner();
|
package/src/pages/ai-models.js
CHANGED
|
@@ -9,11 +9,12 @@ const https = require('https');
|
|
|
9
9
|
|
|
10
10
|
/**
|
|
11
11
|
* API endpoints for fetching models
|
|
12
|
+
* Using beta endpoints where available for latest models
|
|
12
13
|
*/
|
|
13
14
|
const API_ENDPOINTS = {
|
|
14
15
|
anthropic: 'https://api.anthropic.com/v1/models',
|
|
15
16
|
openai: 'https://api.openai.com/v1/models',
|
|
16
|
-
google: 'https://generativelanguage.googleapis.com/
|
|
17
|
+
google: 'https://generativelanguage.googleapis.com/v1beta/models', // v1beta for Gemini 3
|
|
17
18
|
mistral: 'https://api.mistral.ai/v1/models',
|
|
18
19
|
groq: 'https://api.groq.com/openai/v1/models',
|
|
19
20
|
xai: 'https://api.x.ai/v1/models',
|
|
@@ -99,65 +100,202 @@ const getAuthHeaders = (providerId, apiKey) => {
|
|
|
99
100
|
};
|
|
100
101
|
|
|
101
102
|
/**
|
|
102
|
-
*
|
|
103
|
+
* Excluded patterns - models NOT suitable for algo trading
|
|
104
|
+
* These are image, audio, embedding, moderation models
|
|
105
|
+
*/
|
|
106
|
+
const EXCLUDED_PATTERNS = [
|
|
107
|
+
'whisper', 'tts', 'dall-e', 'embedding', 'embed', 'moderation',
|
|
108
|
+
'image', 'vision', 'audio', 'speech', 'realtime', 'transcription',
|
|
109
|
+
'aqa', 'gecko', 'bison', 'learnlm'
|
|
110
|
+
];
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* Check if model should be excluded (not for algo trading)
|
|
114
|
+
* @param {string} modelId - Model ID
|
|
115
|
+
* @returns {boolean} True if should be excluded
|
|
116
|
+
*/
|
|
117
|
+
const shouldExcludeModel = (modelId) => {
|
|
118
|
+
const id = modelId.toLowerCase();
|
|
119
|
+
return EXCLUDED_PATTERNS.some(pattern => id.includes(pattern));
|
|
120
|
+
};
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* Extract version number from model ID for sorting
|
|
124
|
+
* @param {string} modelId - Model ID
|
|
125
|
+
* @returns {number} Version number (higher = newer)
|
|
126
|
+
*/
|
|
127
|
+
const extractVersion = (modelId) => {
|
|
128
|
+
const id = modelId.toLowerCase();
|
|
129
|
+
|
|
130
|
+
// Gemini: gemini-3 > gemini-2.5 > gemini-2.0
|
|
131
|
+
const geminiMatch = id.match(/gemini-(\d+\.?\d*)/);
|
|
132
|
+
if (geminiMatch) return parseFloat(geminiMatch[1]) * 100;
|
|
133
|
+
|
|
134
|
+
// Claude: opus-4.5 > opus-4 > sonnet-4 > haiku
|
|
135
|
+
if (id.includes('opus-4.5') || id.includes('opus-4-5')) return 450;
|
|
136
|
+
if (id.includes('opus-4.1') || id.includes('opus-4-1')) return 410;
|
|
137
|
+
if (id.includes('opus-4')) return 400;
|
|
138
|
+
if (id.includes('sonnet-4.5') || id.includes('sonnet-4-5')) return 350;
|
|
139
|
+
if (id.includes('sonnet-4')) return 340;
|
|
140
|
+
if (id.includes('haiku-4.5') || id.includes('haiku-4-5')) return 250;
|
|
141
|
+
if (id.includes('sonnet-3.7') || id.includes('3-7-sonnet')) return 237;
|
|
142
|
+
if (id.includes('sonnet-3.5') || id.includes('3-5-sonnet')) return 235;
|
|
143
|
+
if (id.includes('haiku-3.5') || id.includes('3-5-haiku')) return 135;
|
|
144
|
+
if (id.includes('opus')) return 300;
|
|
145
|
+
if (id.includes('sonnet')) return 200;
|
|
146
|
+
if (id.includes('haiku')) return 100;
|
|
147
|
+
|
|
148
|
+
// GPT: gpt-4o > gpt-4-turbo > gpt-4 > gpt-3.5
|
|
149
|
+
if (id.includes('gpt-4o')) return 450;
|
|
150
|
+
if (id.includes('gpt-4-turbo')) return 420;
|
|
151
|
+
if (id.includes('gpt-4')) return 400;
|
|
152
|
+
if (id.includes('gpt-3.5')) return 350;
|
|
153
|
+
if (id.includes('o1')) return 500; // o1 reasoning models
|
|
154
|
+
if (id.includes('o3')) return 530; // o3 reasoning models
|
|
155
|
+
|
|
156
|
+
// Mistral: large > medium > small
|
|
157
|
+
if (id.includes('large')) return 300;
|
|
158
|
+
if (id.includes('medium')) return 200;
|
|
159
|
+
if (id.includes('small') || id.includes('tiny')) return 100;
|
|
160
|
+
|
|
161
|
+
// Default
|
|
162
|
+
return 50;
|
|
163
|
+
};
|
|
164
|
+
|
|
165
|
+
/**
|
|
166
|
+
* Get model tier for display (Pro/Flash/Lite)
|
|
167
|
+
* @param {string} modelId - Model ID
|
|
168
|
+
* @returns {number} Tier weight (higher = more powerful)
|
|
169
|
+
*/
|
|
170
|
+
const getModelTier = (modelId) => {
|
|
171
|
+
const id = modelId.toLowerCase();
|
|
172
|
+
if (id.includes('pro') || id.includes('opus') || id.includes('large')) return 30;
|
|
173
|
+
if (id.includes('flash') || id.includes('sonnet') || id.includes('medium')) return 20;
|
|
174
|
+
if (id.includes('lite') || id.includes('haiku') || id.includes('small')) return 10;
|
|
175
|
+
return 15;
|
|
176
|
+
};
|
|
177
|
+
|
|
178
|
+
/**
|
|
179
|
+
* Parse models response based on provider - filtered for algo trading
|
|
103
180
|
* @param {string} providerId - Provider ID
|
|
104
181
|
* @param {Object} data - API response data
|
|
105
|
-
* @returns {Array} Parsed models list
|
|
182
|
+
* @returns {Array} Parsed and filtered models list
|
|
106
183
|
*/
|
|
107
184
|
const parseModelsResponse = (providerId, data) => {
|
|
108
185
|
if (!data) return [];
|
|
109
186
|
|
|
110
187
|
try {
|
|
188
|
+
let models = [];
|
|
189
|
+
|
|
111
190
|
switch (providerId) {
|
|
112
191
|
case 'anthropic':
|
|
113
192
|
// Anthropic returns { data: [{ id, display_name, ... }] }
|
|
114
|
-
|
|
115
|
-
id
|
|
116
|
-
|
|
117
|
-
|
|
193
|
+
models = (data.data || [])
|
|
194
|
+
.filter(m => m.id && !shouldExcludeModel(m.id))
|
|
195
|
+
.map(m => ({
|
|
196
|
+
id: m.id,
|
|
197
|
+
name: m.display_name || m.id
|
|
198
|
+
}));
|
|
199
|
+
break;
|
|
118
200
|
|
|
119
201
|
case 'openai':
|
|
120
|
-
case 'groq':
|
|
121
|
-
case 'xai':
|
|
122
202
|
// OpenAI format: { data: [{ id, ... }] }
|
|
123
|
-
|
|
124
|
-
.filter(m => m.id && !
|
|
203
|
+
models = (data.data || [])
|
|
204
|
+
.filter(m => m.id && !shouldExcludeModel(m.id))
|
|
205
|
+
.filter(m => m.id.startsWith('gpt-') || m.id.startsWith('o1') || m.id.startsWith('o3'))
|
|
125
206
|
.map(m => ({
|
|
126
207
|
id: m.id,
|
|
127
208
|
name: m.id
|
|
128
209
|
}));
|
|
210
|
+
break;
|
|
129
211
|
|
|
130
212
|
case 'google':
|
|
131
|
-
// Google format: { models: [{ name, displayName,
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
213
|
+
// Google format: { models: [{ name, displayName, supportedGenerationMethods }] }
|
|
214
|
+
models = (data.models || [])
|
|
215
|
+
.filter(m => {
|
|
216
|
+
const id = m.name?.replace('models/', '') || '';
|
|
217
|
+
// Only Gemini chat models
|
|
218
|
+
return id.startsWith('gemini-') &&
|
|
219
|
+
!shouldExcludeModel(id) &&
|
|
220
|
+
m.supportedGenerationMethods?.includes('generateContent');
|
|
221
|
+
})
|
|
222
|
+
.map(m => ({
|
|
223
|
+
id: m.name?.replace('models/', '') || m.name,
|
|
224
|
+
name: m.displayName || m.name
|
|
225
|
+
}));
|
|
226
|
+
break;
|
|
227
|
+
|
|
228
|
+
case 'groq':
|
|
229
|
+
// Groq format: { data: [{ id, ... }] }
|
|
230
|
+
models = (data.data || [])
|
|
231
|
+
.filter(m => m.id && !shouldExcludeModel(m.id))
|
|
232
|
+
.map(m => ({
|
|
233
|
+
id: m.id,
|
|
234
|
+
name: m.id
|
|
235
|
+
}));
|
|
236
|
+
break;
|
|
237
|
+
|
|
238
|
+
case 'xai':
|
|
239
|
+
// xAI format: { data: [{ id, ... }] }
|
|
240
|
+
models = (data.data || [])
|
|
241
|
+
.filter(m => m.id && !shouldExcludeModel(m.id))
|
|
242
|
+
.filter(m => m.id.includes('grok'))
|
|
243
|
+
.map(m => ({
|
|
244
|
+
id: m.id,
|
|
245
|
+
name: m.id
|
|
246
|
+
}));
|
|
247
|
+
break;
|
|
136
248
|
|
|
137
249
|
case 'mistral':
|
|
138
250
|
// Mistral format: { data: [{ id, ... }] }
|
|
139
|
-
|
|
140
|
-
id
|
|
141
|
-
|
|
142
|
-
|
|
251
|
+
models = (data.data || [])
|
|
252
|
+
.filter(m => m.id && !shouldExcludeModel(m.id))
|
|
253
|
+
.map(m => ({
|
|
254
|
+
id: m.id,
|
|
255
|
+
name: m.id
|
|
256
|
+
}));
|
|
257
|
+
break;
|
|
143
258
|
|
|
144
259
|
case 'perplexity':
|
|
145
260
|
// Perplexity format varies
|
|
146
|
-
|
|
147
|
-
id
|
|
148
|
-
|
|
149
|
-
|
|
261
|
+
models = (data.models || data.data || [])
|
|
262
|
+
.filter(m => (m.id || m.model) && !shouldExcludeModel(m.id || m.model))
|
|
263
|
+
.map(m => ({
|
|
264
|
+
id: m.id || m.model,
|
|
265
|
+
name: m.id || m.model
|
|
266
|
+
}));
|
|
267
|
+
break;
|
|
150
268
|
|
|
151
269
|
case 'openrouter':
|
|
152
270
|
// OpenRouter format: { data: [{ id, name, ... }] }
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
271
|
+
// Filter to show only main providers' chat models
|
|
272
|
+
models = (data.data || [])
|
|
273
|
+
.filter(m => {
|
|
274
|
+
if (!m.id || shouldExcludeModel(m.id)) return false;
|
|
275
|
+
// Only keep major providers for trading
|
|
276
|
+
const validPrefixes = [
|
|
277
|
+
'anthropic/claude', 'openai/gpt', 'openai/o1', 'openai/o3',
|
|
278
|
+
'google/gemini', 'mistralai/', 'meta-llama/', 'x-ai/grok'
|
|
279
|
+
];
|
|
280
|
+
return validPrefixes.some(p => m.id.startsWith(p));
|
|
281
|
+
})
|
|
282
|
+
.map(m => ({
|
|
283
|
+
id: m.id,
|
|
284
|
+
name: m.name || m.id
|
|
285
|
+
}));
|
|
286
|
+
break;
|
|
157
287
|
|
|
158
288
|
default:
|
|
159
289
|
return [];
|
|
160
290
|
}
|
|
291
|
+
|
|
292
|
+
// Sort by version (newest first), then by tier (most powerful first)
|
|
293
|
+
return models.sort((a, b) => {
|
|
294
|
+
const versionDiff = extractVersion(b.id) - extractVersion(a.id);
|
|
295
|
+
if (versionDiff !== 0) return versionDiff;
|
|
296
|
+
return getModelTier(b.id) - getModelTier(a.id);
|
|
297
|
+
});
|
|
298
|
+
|
|
161
299
|
} catch (error) {
|
|
162
300
|
return [];
|
|
163
301
|
}
|