hedgequantx 2.6.122 → 2.6.124

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hedgequantx",
3
- "version": "2.6.122",
3
+ "version": "2.6.124",
4
4
  "description": "HedgeQuantX - Prop Futures Trading CLI",
5
5
  "main": "src/app.js",
6
6
  "bin": {
@@ -1106,48 +1106,31 @@ const setupProxyOAuth = async (provider, config) => {
1106
1106
 
1107
1107
  // Wrap entire post-auth flow in try-catch to catch any uncaught errors
1108
1108
  try {
1109
- // Step 5: Fetch models from CLIProxyAPI
1109
+ // Step 5: Fetch models from CLIProxyAPI (filtered by provider)
1110
1110
  const modelSpinner = ora({ text: 'Fetching available models from API...', color: 'cyan' }).start();
1111
1111
 
1112
1112
  let models = [];
1113
1113
  try {
1114
- models = await proxyManager.getModels();
1115
- modelSpinner.succeed(`Found ${models.length} models`);
1114
+ // Get models filtered by provider using owned_by field from API
1115
+ models = await proxyManager.getModels(provider.id);
1116
+ modelSpinner.succeed(`Found ${models.length} models for ${provider.name}`);
1116
1117
  } catch (error) {
1117
1118
  modelSpinner.fail(`Failed to fetch models: ${error.message}`);
1118
1119
  await prompts.waitForEnter();
1119
1120
  return await selectProviderOption(provider);
1120
1121
  }
1121
1122
 
1122
- // Filter models for this provider
1123
- const providerPrefixes = {
1124
- anthropic: ['claude'],
1125
- openai: ['gpt', 'o1', 'o3', 'o4'],
1126
- gemini: ['gemini'],
1127
- qwen: ['qwen'],
1128
- iflow: ['deepseek', 'kimi', 'glm']
1129
- };
1130
-
1131
- const prefixes = providerPrefixes[provider.id] || [];
1132
- const filteredModels = models.filter(m => {
1133
- const modelLower = m.toLowerCase();
1134
- return prefixes.some(p => modelLower.includes(p));
1135
- });
1136
-
1137
- // Use filtered models if available, otherwise use all models
1138
- const availableModels = filteredModels.length > 0 ? filteredModels : models;
1139
-
1140
- if (!availableModels || availableModels.length === 0) {
1123
+ if (!models || models.length === 0) {
1141
1124
  console.log();
1142
1125
  console.log(chalk.red(' ERROR: No models found for this provider'));
1143
- console.log(chalk.gray(' Make sure your subscription is active.'));
1126
+ console.log(chalk.gray(' Make sure your subscription is active and authorization completed.'));
1144
1127
  console.log();
1145
1128
  await prompts.waitForEnter();
1146
1129
  return await selectProviderOption(provider);
1147
1130
  }
1148
1131
 
1149
1132
  // Step 6: Let user select model
1150
- const selectedModel = await selectModelFromList(availableModels, config.name);
1133
+ const selectedModel = await selectModelFromList(models, config.name);
1151
1134
  if (!selectedModel) {
1152
1135
  return await selectProviderOption(provider);
1153
1136
  }
@@ -517,9 +517,10 @@ const submitCallback = async (callbackUrl, provider = 'anthropic') => {
517
517
 
518
518
  // Each provider has its own OAuth callback port and path in CLIProxyAPI
519
519
  // We need to submit the callback to the correct port
520
+ // Ports are determined by the redirect_uri in the OAuth URL from CLIProxyAPI
520
521
  const providerConfig = {
521
522
  anthropic: { port: 54545, path: '/callback' },
522
- openai: { port: 16168, path: '/callback' },
523
+ openai: { port: 1455, path: '/auth/callback' },
523
524
  gemini: { port: 8085, path: '/oauth2callback' },
524
525
  qwen: { port: 8087, path: '/oauth2callback' },
525
526
  iflow: { port: 8088, path: '/callback' }
@@ -601,15 +602,32 @@ const waitForAuth = async (state, timeoutMs = 300000, onStatus = () => {}) => {
601
602
 
602
603
  /**
603
604
  * Get available models from the proxy
605
+ * @param {string} [provider] - Optional provider to filter by (anthropic, openai, google, etc.)
604
606
  * @returns {Promise<Array<string>>}
605
607
  */
606
- const getModels = async () => {
608
+ const getModels = async (provider = null) => {
607
609
  await ensureRunning();
608
610
 
609
611
  const response = await proxyRequest('GET', '/v1/models');
610
612
 
611
613
  if (response.data && Array.isArray(response.data)) {
612
- return response.data.map(m => m.id || m).filter(Boolean);
614
+ let models = response.data;
615
+
616
+ // Filter by provider if specified (using owned_by field from API)
617
+ if (provider) {
618
+ // Map our provider IDs to the owned_by values from the API
619
+ const ownerMap = {
620
+ anthropic: 'anthropic',
621
+ openai: 'openai',
622
+ gemini: 'google',
623
+ qwen: 'qwen',
624
+ iflow: 'iflow'
625
+ };
626
+ const owner = ownerMap[provider] || provider;
627
+ models = models.filter(m => m.owned_by && m.owned_by.toLowerCase() === owner.toLowerCase());
628
+ }
629
+
630
+ return models.map(m => m.id || m).filter(Boolean);
613
631
  }
614
632
 
615
633
  return [];