hedgequantx 2.5.26 → 2.5.28
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
package/src/menus/ai-agent.js
CHANGED
|
@@ -1042,7 +1042,7 @@ const selectModel = async (agent) => {
|
|
|
1042
1042
|
drawBoxFooter(boxWidth);
|
|
1043
1043
|
|
|
1044
1044
|
// Fetch models from real API
|
|
1045
|
-
const { fetchAnthropicModels, fetchAnthropicModelsOAuth, fetchOpenAIModels } = require('../services/ai/client');
|
|
1045
|
+
const { fetchAnthropicModels, fetchAnthropicModelsOAuth, fetchGeminiModels, fetchOpenAIModels } = require('../services/ai/client');
|
|
1046
1046
|
|
|
1047
1047
|
let models = null;
|
|
1048
1048
|
const agentCredentials = aiService.getAgentCredentials(agent.id);
|
|
@@ -1060,6 +1060,9 @@ const selectModel = async (agent) => {
|
|
|
1060
1060
|
// Standard API key
|
|
1061
1061
|
models = await fetchAnthropicModels(token);
|
|
1062
1062
|
}
|
|
1063
|
+
} else if (agent.providerId === 'gemini') {
|
|
1064
|
+
// Google Gemini API
|
|
1065
|
+
models = await fetchGeminiModels(agentCredentials?.apiKey);
|
|
1063
1066
|
} else {
|
|
1064
1067
|
// OpenAI-compatible providers
|
|
1065
1068
|
const endpoint = agentCredentials?.endpoint || agent.provider?.endpoint;
|
|
@@ -367,6 +367,33 @@ const fetchAnthropicModelsOAuth = async (accessToken) => {
|
|
|
367
367
|
}
|
|
368
368
|
};
|
|
369
369
|
|
|
370
|
+
/**
|
|
371
|
+
* Fetch available models from Google Gemini API
|
|
372
|
+
* @param {string} apiKey - API key
|
|
373
|
+
* @returns {Promise<Array|null>} Array of model IDs or null on error
|
|
374
|
+
*
|
|
375
|
+
* Data source: https://generativelanguage.googleapis.com/v1/models (GET)
|
|
376
|
+
*/
|
|
377
|
+
const fetchGeminiModels = async (apiKey) => {
|
|
378
|
+
if (!apiKey) return null;
|
|
379
|
+
|
|
380
|
+
const url = `https://generativelanguage.googleapis.com/v1/models?key=${apiKey}`;
|
|
381
|
+
|
|
382
|
+
try {
|
|
383
|
+
const response = await makeRequest(url, { method: 'GET', timeout: 10000 });
|
|
384
|
+
if (response.models && Array.isArray(response.models)) {
|
|
385
|
+
// Filter only generative models and extract the model name
|
|
386
|
+
return response.models
|
|
387
|
+
.filter(m => m.supportedGenerationMethods?.includes('generateContent'))
|
|
388
|
+
.map(m => m.name.replace('models/', ''))
|
|
389
|
+
.filter(Boolean);
|
|
390
|
+
}
|
|
391
|
+
return null;
|
|
392
|
+
} catch (error) {
|
|
393
|
+
return null;
|
|
394
|
+
}
|
|
395
|
+
};
|
|
396
|
+
|
|
370
397
|
/**
|
|
371
398
|
* Fetch available models from OpenAI-compatible API
|
|
372
399
|
* @param {string} endpoint - API endpoint
|
|
@@ -407,6 +434,7 @@ module.exports = {
|
|
|
407
434
|
callGemini,
|
|
408
435
|
fetchAnthropicModels,
|
|
409
436
|
fetchAnthropicModelsOAuth,
|
|
437
|
+
fetchGeminiModels,
|
|
410
438
|
fetchOpenAIModels,
|
|
411
439
|
getValidOAuthToken
|
|
412
440
|
};
|
|
@@ -1,6 +1,9 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* AI Providers Configuration
|
|
3
3
|
* Each provider has connection options (API Key, Plans, etc.)
|
|
4
|
+
*
|
|
5
|
+
* IMPORTANT: models arrays are empty - models MUST be fetched from real APIs
|
|
6
|
+
* No hardcoded model lists allowed (see RULES.md)
|
|
4
7
|
*/
|
|
5
8
|
|
|
6
9
|
const PROVIDERS = {
|
|
@@ -66,8 +69,8 @@ const PROVIDERS = {
|
|
|
66
69
|
name: 'OPENAI (GPT-4)',
|
|
67
70
|
description: 'Direct connection to GPT-4',
|
|
68
71
|
category: 'direct',
|
|
69
|
-
models: [
|
|
70
|
-
defaultModel:
|
|
72
|
+
models: [], // Fetched from API at runtime
|
|
73
|
+
defaultModel: null, // Will use first model from API
|
|
71
74
|
options: [
|
|
72
75
|
{
|
|
73
76
|
id: 'api_key',
|
|
@@ -88,8 +91,8 @@ const PROVIDERS = {
|
|
|
88
91
|
name: 'GEMINI (GOOGLE)',
|
|
89
92
|
description: 'Direct connection to Gemini',
|
|
90
93
|
category: 'direct',
|
|
91
|
-
models: [
|
|
92
|
-
defaultModel:
|
|
94
|
+
models: [], // Fetched from API at runtime
|
|
95
|
+
defaultModel: null, // Will use first model from API
|
|
93
96
|
options: [
|
|
94
97
|
{
|
|
95
98
|
id: 'api_key',
|
|
@@ -110,8 +113,8 @@ const PROVIDERS = {
|
|
|
110
113
|
name: 'DEEPSEEK',
|
|
111
114
|
description: 'Very cheap & capable',
|
|
112
115
|
category: 'direct',
|
|
113
|
-
models: [
|
|
114
|
-
defaultModel:
|
|
116
|
+
models: [], // Fetched from API at runtime
|
|
117
|
+
defaultModel: null, // Will use first model from API
|
|
115
118
|
options: [
|
|
116
119
|
{
|
|
117
120
|
id: 'api_key',
|
|
@@ -132,8 +135,8 @@ const PROVIDERS = {
|
|
|
132
135
|
name: 'GROQ',
|
|
133
136
|
description: 'Ultra fast inference',
|
|
134
137
|
category: 'direct',
|
|
135
|
-
models: [
|
|
136
|
-
defaultModel:
|
|
138
|
+
models: [], // Fetched from API at runtime
|
|
139
|
+
defaultModel: null, // Will use first model from API
|
|
137
140
|
options: [
|
|
138
141
|
{
|
|
139
142
|
id: 'api_key',
|
|
@@ -155,8 +158,8 @@ const PROVIDERS = {
|
|
|
155
158
|
name: 'GROK (XAI)',
|
|
156
159
|
description: 'Elon Musk\'s Grok AI',
|
|
157
160
|
category: 'direct',
|
|
158
|
-
models: [
|
|
159
|
-
defaultModel:
|
|
161
|
+
models: [], // Fetched from API at runtime
|
|
162
|
+
defaultModel: null, // Will use first model from API
|
|
160
163
|
options: [
|
|
161
164
|
{
|
|
162
165
|
id: 'api_key',
|
|
@@ -177,8 +180,8 @@ const PROVIDERS = {
|
|
|
177
180
|
name: 'MISTRAL',
|
|
178
181
|
description: 'European AI leader',
|
|
179
182
|
category: 'direct',
|
|
180
|
-
models: [
|
|
181
|
-
defaultModel:
|
|
183
|
+
models: [], // Fetched from API at runtime
|
|
184
|
+
defaultModel: null, // Will use first model from API
|
|
182
185
|
options: [
|
|
183
186
|
{
|
|
184
187
|
id: 'api_key',
|
|
@@ -199,8 +202,8 @@ const PROVIDERS = {
|
|
|
199
202
|
name: 'PERPLEXITY',
|
|
200
203
|
description: 'Real-time web search AI',
|
|
201
204
|
category: 'direct',
|
|
202
|
-
models: [
|
|
203
|
-
defaultModel:
|
|
205
|
+
models: [], // Fetched from API at runtime
|
|
206
|
+
defaultModel: null, // Will use first model from API
|
|
204
207
|
options: [
|
|
205
208
|
{
|
|
206
209
|
id: 'api_key',
|
|
@@ -222,8 +225,8 @@ const PROVIDERS = {
|
|
|
222
225
|
name: 'TOGETHER AI',
|
|
223
226
|
description: 'Open source models, fast & cheap',
|
|
224
227
|
category: 'direct',
|
|
225
|
-
models: [
|
|
226
|
-
defaultModel:
|
|
228
|
+
models: [], // Fetched from API at runtime
|
|
229
|
+
defaultModel: null, // Will use first model from API
|
|
227
230
|
options: [
|
|
228
231
|
{
|
|
229
232
|
id: 'api_key',
|
|
@@ -246,8 +249,8 @@ const PROVIDERS = {
|
|
|
246
249
|
name: 'QWEN (ALIBABA)',
|
|
247
250
|
description: 'Alibaba\'s top AI model',
|
|
248
251
|
category: 'direct',
|
|
249
|
-
models: [
|
|
250
|
-
defaultModel:
|
|
252
|
+
models: [], // Fetched from API at runtime
|
|
253
|
+
defaultModel: null, // Will use first model from API
|
|
251
254
|
options: [
|
|
252
255
|
{
|
|
253
256
|
id: 'api_key',
|
|
@@ -269,8 +272,8 @@ const PROVIDERS = {
|
|
|
269
272
|
name: 'MOONSHOT (KIMI)',
|
|
270
273
|
description: '200K context window',
|
|
271
274
|
category: 'direct',
|
|
272
|
-
models: [
|
|
273
|
-
defaultModel:
|
|
275
|
+
models: [], // Fetched from API at runtime
|
|
276
|
+
defaultModel: null, // Will use first model from API
|
|
274
277
|
options: [
|
|
275
278
|
{
|
|
276
279
|
id: 'api_key',
|
|
@@ -292,8 +295,8 @@ const PROVIDERS = {
|
|
|
292
295
|
name: '01.AI (YI)',
|
|
293
296
|
description: 'Yi models by Kai-Fu Lee',
|
|
294
297
|
category: 'direct',
|
|
295
|
-
models: [
|
|
296
|
-
defaultModel:
|
|
298
|
+
models: [], // Fetched from API at runtime
|
|
299
|
+
defaultModel: null, // Will use first model from API
|
|
297
300
|
options: [
|
|
298
301
|
{
|
|
299
302
|
id: 'api_key',
|
|
@@ -315,8 +318,8 @@ const PROVIDERS = {
|
|
|
315
318
|
name: 'ZHIPU AI (GLM)',
|
|
316
319
|
description: 'ChatGLM models',
|
|
317
320
|
category: 'direct',
|
|
318
|
-
models: [
|
|
319
|
-
defaultModel:
|
|
321
|
+
models: [], // Fetched from API at runtime
|
|
322
|
+
defaultModel: null, // Will use first model from API
|
|
320
323
|
options: [
|
|
321
324
|
{
|
|
322
325
|
id: 'api_key',
|
|
@@ -338,15 +341,15 @@ const PROVIDERS = {
|
|
|
338
341
|
name: 'BAICHUAN',
|
|
339
342
|
description: 'Multilingual AI model',
|
|
340
343
|
category: 'direct',
|
|
341
|
-
models: [
|
|
342
|
-
defaultModel:
|
|
344
|
+
models: [], // Fetched from API at runtime
|
|
345
|
+
defaultModel: null, // Will use first model from API
|
|
343
346
|
options: [
|
|
344
347
|
{
|
|
345
348
|
id: 'api_key',
|
|
346
349
|
label: 'API KEY',
|
|
347
350
|
description: [
|
|
348
351
|
'Get key at platform.baichuan-ai.com',
|
|
349
|
-
'
|
|
352
|
+
'Strong multilingual support',
|
|
350
353
|
'Competitive pricing'
|
|
351
354
|
],
|
|
352
355
|
fields: ['apiKey'],
|
|
@@ -362,8 +365,8 @@ const PROVIDERS = {
|
|
|
362
365
|
name: 'OLLAMA (LOCAL - FREE)',
|
|
363
366
|
description: '100% free, runs locally',
|
|
364
367
|
category: 'local',
|
|
365
|
-
models: [
|
|
366
|
-
defaultModel:
|
|
368
|
+
models: [], // Fetched from local API at runtime
|
|
369
|
+
defaultModel: null, // Will use first model from API
|
|
367
370
|
options: [
|
|
368
371
|
{
|
|
369
372
|
id: 'local',
|
|
@@ -385,8 +388,8 @@ const PROVIDERS = {
|
|
|
385
388
|
name: 'LM STUDIO (LOCAL - FREE)',
|
|
386
389
|
description: 'Local with GUI',
|
|
387
390
|
category: 'local',
|
|
388
|
-
models: [],
|
|
389
|
-
defaultModel:
|
|
391
|
+
models: [], // Fetched from local API at runtime
|
|
392
|
+
defaultModel: null, // Will use first model from API
|
|
390
393
|
options: [
|
|
391
394
|
{
|
|
392
395
|
id: 'local',
|
|
@@ -409,8 +412,8 @@ const PROVIDERS = {
|
|
|
409
412
|
name: 'CUSTOM ENDPOINT',
|
|
410
413
|
description: 'Any OpenAI-compatible API',
|
|
411
414
|
category: 'custom',
|
|
412
|
-
models: [],
|
|
413
|
-
defaultModel:
|
|
415
|
+
models: [], // User must specify model
|
|
416
|
+
defaultModel: null,
|
|
414
417
|
options: [
|
|
415
418
|
{
|
|
416
419
|
id: 'custom',
|