hedgequantx 2.6.92 → 2.6.94

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hedgequantx",
3
- "version": "2.6.92",
3
+ "version": "2.6.94",
4
4
  "description": "HedgeQuantX - Prop Futures Trading CLI",
5
5
  "main": "src/app.js",
6
6
  "bin": {
@@ -954,29 +954,14 @@ const setupBrowserOAuth = async (provider, config) => {
954
954
  // Fallback to default models if fetch fails
955
955
  }
956
956
 
957
- // Fallback default models if API doesn't return list
958
- // Updated January 2026 with latest models
959
- const defaultModels = {
960
- anthropic: ['claude-sonnet-4-20250514', 'claude-opus-4-20250514', 'claude-3-5-sonnet-20241022', 'claude-3-5-haiku-20241022'],
961
- openai: [
962
- 'gpt-4.5-preview', // NEW: GPT-4.5 (latest)
963
- 'gpt-4.5', // NEW: GPT-4.5
964
- 'o3', // NEW: o3 (latest reasoning)
965
- 'o3-mini', // o3-mini
966
- 'o1', // o1
967
- 'o1-mini', // o1-mini
968
- 'gpt-4o', // GPT-4o
969
- 'gpt-4o-mini', // GPT-4o mini
970
- 'gpt-4-turbo', // GPT-4 Turbo
971
- ],
972
- gemini: ['gemini-2.5-pro', 'gemini-2.5-flash', 'gemini-2.0-flash', 'gemini-1.5-pro'],
973
- iflow: ['deepseek-v3', 'deepseek-chat', 'kimi', 'glm-4'],
974
- qwen: ['qwen-max', 'qwen-plus', 'qwen-turbo', 'qwen2.5-72b-instruct']
975
- };
976
-
957
+ // NO hardcoded fallback - models MUST come from API
958
+ // Rule: ZERO fake/mock data - API only
977
959
  if (!models || models.length === 0) {
978
- models = defaultModels[provider.id] || ['default'];
979
- spinner.warn('USING DEFAULT MODEL LIST');
960
+ spinner.fail('NO MODELS AVAILABLE FROM API');
961
+ console.log(chalk.red('\n Could not fetch models from provider API'));
962
+ console.log(chalk.gray(' Please check your OAuth credentials or try again'));
963
+ await prompts.waitForEnter();
964
+ return await selectProviderOption(provider);
980
965
  } else {
981
966
  spinner.succeed(`FOUND ${models.length} MODELS`);
982
967
  }
@@ -541,6 +541,14 @@ const fetchGeminiModels = async (apiKey) => {
541
541
  *
542
542
  * Data source: {endpoint}/models (GET)
543
543
  */
544
+ /**
545
+ * Fetch available models from OpenAI-compatible API
546
+ * @param {string} endpoint - API endpoint base URL
547
+ * @param {string} apiKey - API key or OAuth token
548
+ * @returns {Promise<Array|null>} Array of model IDs from API, null if unavailable
549
+ *
550
+ * Data source: {endpoint}/models (GET)
551
+ */
544
552
  const fetchOpenAIModels = async (endpoint, apiKey) => {
545
553
  if (!endpoint) return null;
546
554
 
@@ -554,44 +562,27 @@ const fetchOpenAIModels = async (endpoint, apiKey) => {
554
562
  headers['Authorization'] = `Bearer ${apiKey}`;
555
563
  }
556
564
 
557
- // Priority order for OpenAI models (newest/best first)
558
- const modelPriority = [
559
- 'gpt-4.5-preview', 'gpt-4.5',
560
- 'o3', 'o3-mini', 'o3-mini-high',
561
- 'o1', 'o1-pro', 'o1-mini',
562
- 'gpt-4o', 'gpt-4o-mini', 'gpt-4o-audio-preview',
563
- 'gpt-4-turbo', 'gpt-4-turbo-preview',
564
- 'gpt-4', 'gpt-3.5-turbo'
565
- ];
566
-
567
565
  try {
568
566
  const response = await makeRequest(url, { method: 'GET', headers, timeout: 10000 });
569
567
  if (response.data && Array.isArray(response.data)) {
570
- const allModels = response.data.map(m => m.id).filter(Boolean);
568
+ // Return models from API - filter to chat models only
569
+ const chatModels = response.data
570
+ .map(m => m.id)
571
+ .filter(id => id && (
572
+ id.includes('gpt') ||
573
+ id.includes('o1') ||
574
+ id.includes('o3') ||
575
+ id.includes('claude') ||
576
+ id.includes('gemini')
577
+ ))
578
+ .filter(id =>
579
+ !id.includes('embedding') &&
580
+ !id.includes('whisper') &&
581
+ !id.includes('tts') &&
582
+ !id.includes('dall-e')
583
+ );
571
584
 
572
- // Filter to only chat/completion models, exclude embeddings/audio-only etc
573
- const chatModels = allModels.filter(m =>
574
- (m.includes('gpt') || m.includes('o1') || m.includes('o3')) &&
575
- !m.includes('embedding') &&
576
- !m.includes('whisper') &&
577
- !m.includes('tts') &&
578
- !m.includes('dall-e') &&
579
- !m.includes('davinci') &&
580
- !m.includes('babbage') &&
581
- !m.includes('instruct')
582
- );
583
-
584
- // Sort by priority
585
- chatModels.sort((a, b) => {
586
- const aIdx = modelPriority.findIndex(p => a.includes(p));
587
- const bIdx = modelPriority.findIndex(p => b.includes(p));
588
- if (aIdx === -1 && bIdx === -1) return 0;
589
- if (aIdx === -1) return 1;
590
- if (bIdx === -1) return -1;
591
- return aIdx - bIdx;
592
- });
593
-
594
- return chatModels.length > 0 ? chatModels : allModels;
585
+ return chatModels.length > 0 ? chatModels : null;
595
586
  }
596
587
  return null;
597
588
  } catch (error) {
@@ -601,9 +592,16 @@ const fetchOpenAIModels = async (endpoint, apiKey) => {
601
592
 
602
593
  /**
603
594
  * Fetch available models for OAuth-authenticated providers
595
+ * Uses multiple API endpoints to discover available models
596
+ *
604
597
  * @param {string} providerId - Provider ID (anthropic, openai, gemini, etc.)
605
598
  * @param {string} accessToken - OAuth access token
606
- * @returns {Promise<Array|null>} Array of model IDs or null on error
599
+ * @returns {Promise<Array|null>} Array of model IDs from API, null if unavailable
600
+ *
601
+ * Data sources:
602
+ * - OpenAI: https://api.openai.com/v1/models (GET)
603
+ * - Anthropic: https://api.anthropic.com/v1/models (GET)
604
+ * - Gemini: https://generativelanguage.googleapis.com/v1/models (GET)
607
605
  */
608
606
  const fetchModelsWithOAuth = async (providerId, accessToken) => {
609
607
  if (!accessToken) return null;
@@ -613,17 +611,48 @@ const fetchModelsWithOAuth = async (providerId, accessToken) => {
613
611
  case 'anthropic':
614
612
  return await fetchAnthropicModelsOAuth(accessToken);
615
613
 
616
- case 'openai':
617
- // OpenAI OAuth uses the same endpoint as API key
618
- return await fetchOpenAIModels('https://api.openai.com/v1', accessToken);
614
+ case 'openai': {
615
+ // Try OpenAI /v1/models endpoint with OAuth token
616
+ const openaiModels = await fetchOpenAIModels('https://api.openai.com/v1', accessToken);
617
+ if (openaiModels && openaiModels.length > 0) {
618
+ return openaiModels;
619
+ }
620
+
621
+ // Try alternative: ChatGPT backend API (for Plus/Pro plans)
622
+ try {
623
+ const chatgptUrl = 'https://chatgpt.com/backend-api/models';
624
+ const chatgptHeaders = {
625
+ 'Authorization': `Bearer ${accessToken}`,
626
+ 'Content-Type': 'application/json'
627
+ };
628
+ const chatgptResponse = await makeRequest(chatgptUrl, {
629
+ method: 'GET',
630
+ headers: chatgptHeaders,
631
+ timeout: 10000
632
+ });
633
+ if (chatgptResponse.models && Array.isArray(chatgptResponse.models)) {
634
+ return chatgptResponse.models
635
+ .map(m => m.slug || m.id || m.name)
636
+ .filter(Boolean);
637
+ }
638
+ } catch (e) {
639
+ // ChatGPT backend not available
640
+ }
641
+
642
+ return null;
643
+ }
619
644
 
620
- case 'gemini':
621
- // Gemini OAuth - try to fetch from API
645
+ case 'gemini': {
646
+ // Gemini OAuth - fetch from Generative Language API
622
647
  const geminiUrl = 'https://generativelanguage.googleapis.com/v1/models';
623
648
  const geminiHeaders = {
624
649
  'Authorization': `Bearer ${accessToken}`
625
650
  };
626
- const geminiResponse = await makeRequest(geminiUrl, { method: 'GET', headers: geminiHeaders, timeout: 10000 });
651
+ const geminiResponse = await makeRequest(geminiUrl, {
652
+ method: 'GET',
653
+ headers: geminiHeaders,
654
+ timeout: 10000
655
+ });
627
656
  if (geminiResponse.models && Array.isArray(geminiResponse.models)) {
628
657
  return geminiResponse.models
629
658
  .filter(m => m.supportedGenerationMethods?.includes('generateContent'))
@@ -631,6 +660,55 @@ const fetchModelsWithOAuth = async (providerId, accessToken) => {
631
660
  .filter(Boolean);
632
661
  }
633
662
  return null;
663
+ }
664
+
665
+ case 'qwen': {
666
+ // Qwen - try to fetch models from Alibaba Cloud API
667
+ try {
668
+ const qwenUrl = 'https://dashscope.aliyuncs.com/api/v1/models';
669
+ const qwenHeaders = {
670
+ 'Authorization': `Bearer ${accessToken}`,
671
+ 'Content-Type': 'application/json'
672
+ };
673
+ const qwenResponse = await makeRequest(qwenUrl, {
674
+ method: 'GET',
675
+ headers: qwenHeaders,
676
+ timeout: 10000
677
+ });
678
+ if (qwenResponse.data && Array.isArray(qwenResponse.data)) {
679
+ return qwenResponse.data
680
+ .map(m => m.id || m.model_id)
681
+ .filter(Boolean);
682
+ }
683
+ } catch (e) {
684
+ // Qwen API may not support model listing
685
+ }
686
+ return null;
687
+ }
688
+
689
+ case 'iflow': {
690
+ // iFlow - fetch models from iFlow API
691
+ try {
692
+ const iflowUrl = 'https://apis.iflow.cn/v1/models';
693
+ const iflowHeaders = {
694
+ 'Authorization': `Bearer ${accessToken}`,
695
+ 'Content-Type': 'application/json'
696
+ };
697
+ const iflowResponse = await makeRequest(iflowUrl, {
698
+ method: 'GET',
699
+ headers: iflowHeaders,
700
+ timeout: 10000
701
+ });
702
+ if (iflowResponse.data && Array.isArray(iflowResponse.data)) {
703
+ return iflowResponse.data
704
+ .map(m => m.id)
705
+ .filter(Boolean);
706
+ }
707
+ } catch (e) {
708
+ // iFlow API may not support model listing
709
+ }
710
+ return null;
711
+ }
634
712
 
635
713
  default:
636
714
  return null;