hedgequantx 2.6.93 → 2.6.94

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hedgequantx",
3
- "version": "2.6.93",
3
+ "version": "2.6.94",
4
4
  "description": "HedgeQuantX - Prop Futures Trading CLI",
5
5
  "main": "src/app.js",
6
6
  "bin": {
@@ -592,9 +592,16 @@ const fetchOpenAIModels = async (endpoint, apiKey) => {
592
592
 
593
593
  /**
594
594
  * Fetch available models for OAuth-authenticated providers
595
+ * Uses multiple API endpoints to discover available models
596
+ *
595
597
  * @param {string} providerId - Provider ID (anthropic, openai, gemini, etc.)
596
598
  * @param {string} accessToken - OAuth access token
597
- * @returns {Promise<Array|null>} Array of model IDs or null on error
599
+ * @returns {Promise<Array|null>} Array of model IDs from API, null if unavailable
600
+ *
601
+ * Data sources:
602
+ * - OpenAI: https://api.openai.com/v1/models (GET)
603
+ * - Anthropic: https://api.anthropic.com/v1/models (GET)
604
+ * - Gemini: https://generativelanguage.googleapis.com/v1/models (GET)
598
605
  */
599
606
  const fetchModelsWithOAuth = async (providerId, accessToken) => {
600
607
  if (!accessToken) return null;
@@ -604,17 +611,48 @@ const fetchModelsWithOAuth = async (providerId, accessToken) => {
604
611
  case 'anthropic':
605
612
  return await fetchAnthropicModelsOAuth(accessToken);
606
613
 
607
- case 'openai':
608
- // OpenAI OAuth uses the same endpoint as API key
609
- return await fetchOpenAIModels('https://api.openai.com/v1', accessToken);
614
+ case 'openai': {
615
+ // Try OpenAI /v1/models endpoint with OAuth token
616
+ const openaiModels = await fetchOpenAIModels('https://api.openai.com/v1', accessToken);
617
+ if (openaiModels && openaiModels.length > 0) {
618
+ return openaiModels;
619
+ }
620
+
621
+ // Try alternative: ChatGPT backend API (for Plus/Pro plans)
622
+ try {
623
+ const chatgptUrl = 'https://chatgpt.com/backend-api/models';
624
+ const chatgptHeaders = {
625
+ 'Authorization': `Bearer ${accessToken}`,
626
+ 'Content-Type': 'application/json'
627
+ };
628
+ const chatgptResponse = await makeRequest(chatgptUrl, {
629
+ method: 'GET',
630
+ headers: chatgptHeaders,
631
+ timeout: 10000
632
+ });
633
+ if (chatgptResponse.models && Array.isArray(chatgptResponse.models)) {
634
+ return chatgptResponse.models
635
+ .map(m => m.slug || m.id || m.name)
636
+ .filter(Boolean);
637
+ }
638
+ } catch (e) {
639
+ // ChatGPT backend not available
640
+ }
641
+
642
+ return null;
643
+ }
610
644
 
611
- case 'gemini':
612
- // Gemini OAuth - try to fetch from API
645
+ case 'gemini': {
646
+ // Gemini OAuth - fetch from Generative Language API
613
647
  const geminiUrl = 'https://generativelanguage.googleapis.com/v1/models';
614
648
  const geminiHeaders = {
615
649
  'Authorization': `Bearer ${accessToken}`
616
650
  };
617
- const geminiResponse = await makeRequest(geminiUrl, { method: 'GET', headers: geminiHeaders, timeout: 10000 });
651
+ const geminiResponse = await makeRequest(geminiUrl, {
652
+ method: 'GET',
653
+ headers: geminiHeaders,
654
+ timeout: 10000
655
+ });
618
656
  if (geminiResponse.models && Array.isArray(geminiResponse.models)) {
619
657
  return geminiResponse.models
620
658
  .filter(m => m.supportedGenerationMethods?.includes('generateContent'))
@@ -622,6 +660,55 @@ const fetchModelsWithOAuth = async (providerId, accessToken) => {
622
660
  .filter(Boolean);
623
661
  }
624
662
  return null;
663
+ }
664
+
665
+ case 'qwen': {
666
+ // Qwen - try to fetch models from Alibaba Cloud API
667
+ try {
668
+ const qwenUrl = 'https://dashscope.aliyuncs.com/api/v1/models';
669
+ const qwenHeaders = {
670
+ 'Authorization': `Bearer ${accessToken}`,
671
+ 'Content-Type': 'application/json'
672
+ };
673
+ const qwenResponse = await makeRequest(qwenUrl, {
674
+ method: 'GET',
675
+ headers: qwenHeaders,
676
+ timeout: 10000
677
+ });
678
+ if (qwenResponse.data && Array.isArray(qwenResponse.data)) {
679
+ return qwenResponse.data
680
+ .map(m => m.id || m.model_id)
681
+ .filter(Boolean);
682
+ }
683
+ } catch (e) {
684
+ // Qwen API may not support model listing
685
+ }
686
+ return null;
687
+ }
688
+
689
+ case 'iflow': {
690
+ // iFlow - fetch models from iFlow API
691
+ try {
692
+ const iflowUrl = 'https://apis.iflow.cn/v1/models';
693
+ const iflowHeaders = {
694
+ 'Authorization': `Bearer ${accessToken}`,
695
+ 'Content-Type': 'application/json'
696
+ };
697
+ const iflowResponse = await makeRequest(iflowUrl, {
698
+ method: 'GET',
699
+ headers: iflowHeaders,
700
+ timeout: 10000
701
+ });
702
+ if (iflowResponse.data && Array.isArray(iflowResponse.data)) {
703
+ return iflowResponse.data
704
+ .map(m => m.id)
705
+ .filter(Boolean);
706
+ }
707
+ } catch (e) {
708
+ // iFlow API may not support model listing
709
+ }
710
+ return null;
711
+ }
625
712
 
626
713
  default:
627
714
  return null;