hedgequantx 2.6.100 → 2.6.101
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/menus/ai-agent.js +55 -42
- package/src/services/ai/client.js +35 -19
package/package.json
CHANGED
package/src/menus/ai-agent.js
CHANGED
|
@@ -892,35 +892,42 @@ const setupRemoteOAuth = async (provider, config) => {
|
|
|
892
892
|
};
|
|
893
893
|
|
|
894
894
|
// Try to fetch models with the new token
|
|
895
|
-
spinner.text = 'Fetching available models...';
|
|
895
|
+
spinner.text = 'Fetching available models from API...';
|
|
896
896
|
spinner.start();
|
|
897
897
|
|
|
898
898
|
let models = [];
|
|
899
|
+
let fetchError = null;
|
|
899
900
|
try {
|
|
900
901
|
const { fetchModelsWithOAuth } = require('../services/ai/client');
|
|
901
902
|
models = await fetchModelsWithOAuth(provider.id, result.access);
|
|
902
903
|
} catch (e) {
|
|
903
|
-
|
|
904
|
+
fetchError = e.message;
|
|
904
905
|
}
|
|
905
906
|
|
|
906
|
-
|
|
907
|
-
|
|
907
|
+
// RULE: Models MUST come from API - no hardcoded fallback
|
|
908
908
|
if (!models || models.length === 0) {
|
|
909
|
-
spinner.
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
// Let user select model from list
|
|
920
|
-
selectedModel = await selectModelFromList(models, config.name);
|
|
921
|
-
if (!selectedModel) {
|
|
922
|
-
return await selectProviderOption(provider);
|
|
909
|
+
spinner.fail('Could not fetch models from API');
|
|
910
|
+
console.log();
|
|
911
|
+
console.log(chalk.red(' ERROR: Unable to retrieve models from provider API'));
|
|
912
|
+
console.log(chalk.white(' Possible causes:'));
|
|
913
|
+
console.log(chalk.gray(' - OAuth token may not have permission to list models'));
|
|
914
|
+
console.log(chalk.gray(' - Network issue or API temporarily unavailable'));
|
|
915
|
+
console.log(chalk.gray(' - Provider API may have changed'));
|
|
916
|
+
if (fetchError) {
|
|
917
|
+
console.log(chalk.gray(` - Error: ${fetchError}`));
|
|
923
918
|
}
|
|
919
|
+
console.log();
|
|
920
|
+
console.log(chalk.yellow(' Please try again or use API Key authentication instead.'));
|
|
921
|
+
await prompts.waitForEnter();
|
|
922
|
+
return await selectProviderOption(provider);
|
|
923
|
+
}
|
|
924
|
+
|
|
925
|
+
spinner.succeed(`Found ${models.length} models`);
|
|
926
|
+
|
|
927
|
+
// Let user select model from list
|
|
928
|
+
const selectedModel = await selectModelFromList(models, config.name);
|
|
929
|
+
if (!selectedModel) {
|
|
930
|
+
return await selectProviderOption(provider);
|
|
924
931
|
}
|
|
925
932
|
|
|
926
933
|
// Add agent
|
|
@@ -938,24 +945,7 @@ const setupRemoteOAuth = async (provider, config) => {
|
|
|
938
945
|
return await aiAgentMenu();
|
|
939
946
|
};
|
|
940
947
|
|
|
941
|
-
|
|
942
|
-
* Prompt user to enter model name manually when API doesn't return models
|
|
943
|
-
*/
|
|
944
|
-
const promptForModelName = async (providerName) => {
|
|
945
|
-
console.log();
|
|
946
|
-
console.log(chalk.yellow(' Could not fetch models from API.'));
|
|
947
|
-
console.log(chalk.white(' Please enter the model name manually.'));
|
|
948
|
-
console.log(chalk.gray(' (Check provider docs for available models)'));
|
|
949
|
-
console.log();
|
|
950
|
-
|
|
951
|
-
const modelName = await prompts.textInput(chalk.cyan('MODEL NAME:'));
|
|
952
|
-
|
|
953
|
-
if (!modelName || modelName.trim() === '' || modelName.trim() === '<') {
|
|
954
|
-
return null;
|
|
955
|
-
}
|
|
956
|
-
|
|
957
|
-
return modelName.trim();
|
|
958
|
-
};
|
|
948
|
+
// NOTE: promptForModelName was removed - models MUST come from API (RULES.md)
|
|
959
949
|
|
|
960
950
|
/**
|
|
961
951
|
* Setup OAuth via Manual Code Entry (unified flow for local and VPS)
|
|
@@ -1252,7 +1242,7 @@ const setupDeviceFlowOAuth = async (provider, config) => {
|
|
|
1252
1242
|
return await selectProviderOption(provider);
|
|
1253
1243
|
}
|
|
1254
1244
|
|
|
1255
|
-
pollSpinner.text = 'FETCHING AVAILABLE MODELS...';
|
|
1245
|
+
pollSpinner.text = 'FETCHING AVAILABLE MODELS FROM API...';
|
|
1256
1246
|
|
|
1257
1247
|
// Store OAuth credentials
|
|
1258
1248
|
const credentials = {
|
|
@@ -1264,13 +1254,36 @@ const setupDeviceFlowOAuth = async (provider, config) => {
|
|
|
1264
1254
|
}
|
|
1265
1255
|
};
|
|
1266
1256
|
|
|
1267
|
-
//
|
|
1268
|
-
|
|
1257
|
+
// Fetch models from API - NO hardcoded fallback (RULES.md)
|
|
1258
|
+
let models = [];
|
|
1259
|
+
let fetchError = null;
|
|
1260
|
+
try {
|
|
1261
|
+
const { fetchModelsWithOAuth } = require('../services/ai/client');
|
|
1262
|
+
models = await fetchModelsWithOAuth(provider.id, pollResult.access);
|
|
1263
|
+
} catch (e) {
|
|
1264
|
+
fetchError = e.message;
|
|
1265
|
+
}
|
|
1269
1266
|
|
|
1270
|
-
|
|
1267
|
+
if (!models || models.length === 0) {
|
|
1268
|
+
pollSpinner.fail('Could not fetch models from API');
|
|
1269
|
+
console.log();
|
|
1270
|
+
console.log(chalk.red(' ERROR: Unable to retrieve models from provider API'));
|
|
1271
|
+
console.log(chalk.white(' Possible causes:'));
|
|
1272
|
+
console.log(chalk.gray(' - OAuth token may not have permission to list models'));
|
|
1273
|
+
console.log(chalk.gray(' - Network issue or API temporarily unavailable'));
|
|
1274
|
+
if (fetchError) {
|
|
1275
|
+
console.log(chalk.gray(` - Error: ${fetchError}`));
|
|
1276
|
+
}
|
|
1277
|
+
console.log();
|
|
1278
|
+
console.log(chalk.yellow(' Please try again or use API Key authentication instead.'));
|
|
1279
|
+
await prompts.waitForEnter();
|
|
1280
|
+
return await selectProviderOption(provider);
|
|
1281
|
+
}
|
|
1271
1282
|
|
|
1272
|
-
|
|
1273
|
-
|
|
1283
|
+
pollSpinner.succeed(`Found ${models.length} models`);
|
|
1284
|
+
|
|
1285
|
+
// Let user select model from API list
|
|
1286
|
+
const selectedModel = await selectModelFromList(models, config.name);
|
|
1274
1287
|
if (!selectedModel) {
|
|
1275
1288
|
return await selectProviderOption(provider);
|
|
1276
1289
|
}
|
|
@@ -525,15 +525,17 @@ const fetchAnthropicModels = async (apiKey) => {
|
|
|
525
525
|
|
|
526
526
|
/**
|
|
527
527
|
* Fetch available models from Anthropic API (OAuth auth)
|
|
528
|
+
*
|
|
528
529
|
* @param {string} accessToken - OAuth access token
|
|
529
|
-
* @returns {Promise<Array|null>} Array of model IDs
|
|
530
|
+
* @returns {Promise<Array|null>} Array of model IDs from API, null if unavailable
|
|
530
531
|
*
|
|
531
532
|
* Data source: https://api.anthropic.com/v1/models (GET with Bearer token)
|
|
533
|
+
* NO HARDCODED FALLBACK - models must come from API only
|
|
532
534
|
*/
|
|
533
535
|
const fetchAnthropicModelsOAuth = async (accessToken) => {
|
|
534
536
|
if (!accessToken) return null;
|
|
535
537
|
|
|
536
|
-
const
|
|
538
|
+
const modelsUrl = 'https://api.anthropic.com/v1/models';
|
|
537
539
|
|
|
538
540
|
const headers = {
|
|
539
541
|
'Authorization': `Bearer ${accessToken}`,
|
|
@@ -542,14 +544,16 @@ const fetchAnthropicModelsOAuth = async (accessToken) => {
|
|
|
542
544
|
};
|
|
543
545
|
|
|
544
546
|
try {
|
|
545
|
-
const response = await makeRequest(
|
|
547
|
+
const response = await makeRequest(modelsUrl, { method: 'GET', headers, timeout: 15000 });
|
|
546
548
|
if (response.data && Array.isArray(response.data)) {
|
|
547
549
|
const models = response.data.map(m => m.id).filter(Boolean);
|
|
548
550
|
if (models.length > 0) return models;
|
|
549
551
|
}
|
|
550
552
|
return null;
|
|
551
553
|
} catch (error) {
|
|
552
|
-
|
|
554
|
+
if (process.env.HQX_DEBUG) {
|
|
555
|
+
console.error('[DEBUG] fetchAnthropicModelsOAuth error:', error.message);
|
|
556
|
+
}
|
|
553
557
|
return null;
|
|
554
558
|
}
|
|
555
559
|
};
|
|
@@ -661,6 +665,7 @@ const fetchModelsWithOAuth = async (providerId, accessToken) => {
|
|
|
661
665
|
|
|
662
666
|
case 'openai': {
|
|
663
667
|
// Try OpenAI /v1/models endpoint with OAuth token
|
|
668
|
+
// NO HARDCODED FALLBACK - models must come from API only
|
|
664
669
|
const openaiModels = await fetchOpenAIModels('https://api.openai.com/v1', accessToken);
|
|
665
670
|
if (openaiModels && openaiModels.length > 0) {
|
|
666
671
|
return openaiModels;
|
|
@@ -684,7 +689,9 @@ const fetchModelsWithOAuth = async (providerId, accessToken) => {
|
|
|
684
689
|
.filter(Boolean);
|
|
685
690
|
}
|
|
686
691
|
} catch (e) {
|
|
687
|
-
|
|
692
|
+
if (process.env.HQX_DEBUG) {
|
|
693
|
+
console.error('[DEBUG] ChatGPT backend error:', e.message);
|
|
694
|
+
}
|
|
688
695
|
}
|
|
689
696
|
|
|
690
697
|
return null;
|
|
@@ -692,21 +699,30 @@ const fetchModelsWithOAuth = async (providerId, accessToken) => {
|
|
|
692
699
|
|
|
693
700
|
case 'gemini': {
|
|
694
701
|
// Gemini OAuth - fetch from Generative Language API
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
'
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
702
|
+
// NO HARDCODED FALLBACK - models must come from API only
|
|
703
|
+
try {
|
|
704
|
+
const geminiUrl = 'https://generativelanguage.googleapis.com/v1/models';
|
|
705
|
+
const geminiHeaders = {
|
|
706
|
+
'Authorization': `Bearer ${accessToken}`
|
|
707
|
+
};
|
|
708
|
+
const geminiResponse = await makeRequest(geminiUrl, {
|
|
709
|
+
method: 'GET',
|
|
710
|
+
headers: geminiHeaders,
|
|
711
|
+
timeout: 15000
|
|
712
|
+
});
|
|
713
|
+
if (geminiResponse.models && Array.isArray(geminiResponse.models)) {
|
|
714
|
+
const models = geminiResponse.models
|
|
715
|
+
.filter(m => m.supportedGenerationMethods?.includes('generateContent'))
|
|
716
|
+
.map(m => m.name.replace('models/', ''))
|
|
717
|
+
.filter(Boolean);
|
|
718
|
+
if (models.length > 0) return models;
|
|
719
|
+
}
|
|
720
|
+
} catch (e) {
|
|
721
|
+
if (process.env.HQX_DEBUG) {
|
|
722
|
+
console.error('[DEBUG] Gemini models API error:', e.message);
|
|
723
|
+
}
|
|
709
724
|
}
|
|
725
|
+
|
|
710
726
|
return null;
|
|
711
727
|
}
|
|
712
728
|
|