hedgequantx 2.8.4 → 2.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hedgequantx",
3
- "version": "2.8.4",
3
+ "version": "2.9.0",
4
4
  "description": "HedgeQuantX - Prop Futures Trading CLI",
5
5
  "main": "src/app.js",
6
6
  "bin": {
@@ -239,7 +239,8 @@ const drawModelsTable = (provider, models, boxWidth) => {
239
239
 
240
240
  /**
241
241
  * Draw provider configuration window
242
- * @param {Object} provider - Provider object
242
+ * Shows connection options based on provider capabilities (OAuth and/or API Key)
243
+ * @param {Object} provider - Provider object with supportsOAuth and supportsApiKey flags
243
244
  * @param {Object} config - Current config
244
245
  * @param {number} boxWidth - Box width
245
246
  */
@@ -249,32 +250,47 @@ const drawProviderWindow = (provider, config, boxWidth) => {
249
250
  const col2Width = W - col1Width;
250
251
  const providerConfig = config.providers[provider.id] || {};
251
252
 
253
+ // Check provider capabilities (default to both if not specified)
254
+ const supportsOAuth = provider.supportsOAuth !== false;
255
+ const supportsApiKey = provider.supportsApiKey !== false;
256
+
252
257
  // New rectangle (banner is always closed)
253
258
  console.log(chalk.cyan('╔' + '═'.repeat(W) + '╗'));
254
259
  console.log(chalk.cyan('║') + chalk[provider.color].bold(centerText(provider.name.toUpperCase(), W)) + chalk.cyan('║'));
255
260
  console.log(chalk.cyan('╠' + '═'.repeat(W) + '╣'));
256
261
 
257
- // Options in 2 columns (centered)
258
- const opt1 = '[1] CONNECT VIA PAID PLAN';
259
- const opt2 = '[2] CONNECT VIA API KEY';
260
-
261
- const left1 = chalk.green(opt1);
262
- const right1 = chalk.yellow(opt2);
263
- const left1Len = visibleLength(left1);
264
- const right1Len = visibleLength(right1);
265
- const left1PadTotal = col1Width - left1Len;
266
- const left1PadL = Math.floor(left1PadTotal / 2);
267
- const left1PadR = left1PadTotal - left1PadL;
268
- const right1PadTotal = col2Width - right1Len;
269
- const right1PadL = Math.floor(right1PadTotal / 2);
270
- const right1PadR = right1PadTotal - right1PadL;
271
-
272
- console.log(
273
- chalk.cyan('║') +
274
- ' '.repeat(left1PadL) + left1 + ' '.repeat(left1PadR) +
275
- ' '.repeat(right1PadL) + right1 + ' '.repeat(right1PadR) +
276
- chalk.cyan('')
277
- );
262
+ // Display connection options based on provider capabilities
263
+ if (supportsOAuth && supportsApiKey) {
264
+ // Both options: 2 columns
265
+ const opt1 = '[1] CONNECT VIA PAID PLAN';
266
+ const opt2 = '[2] CONNECT VIA API KEY';
267
+
268
+ const left1 = chalk.green(opt1);
269
+ const right1 = chalk.yellow(opt2);
270
+ const left1Len = visibleLength(left1);
271
+ const right1Len = visibleLength(right1);
272
+ const left1PadTotal = col1Width - left1Len;
273
+ const left1PadL = Math.floor(left1PadTotal / 2);
274
+ const left1PadR = left1PadTotal - left1PadL;
275
+ const right1PadTotal = col2Width - right1Len;
276
+ const right1PadL = Math.floor(right1PadTotal / 2);
277
+ const right1PadR = right1PadTotal - right1PadL;
278
+
279
+ console.log(
280
+ chalk.cyan('') +
281
+ ' '.repeat(left1PadL) + left1 + ' '.repeat(left1PadR) +
282
+ ' '.repeat(right1PadL) + right1 + ' '.repeat(right1PadR) +
283
+ chalk.cyan('║')
284
+ );
285
+ } else if (supportsApiKey) {
286
+ // API Key only: centered single option
287
+ const opt = '[1] CONNECT VIA API KEY';
288
+ console.log(chalk.cyan('║') + chalk.yellow(centerText(opt, W)) + chalk.cyan('║'));
289
+ } else if (supportsOAuth) {
290
+ // OAuth only: centered single option
291
+ const opt = '[1] CONNECT VIA PAID PLAN';
292
+ console.log(chalk.cyan('║') + chalk.green(centerText(opt, W)) + chalk.cyan('║'));
293
+ }
278
294
 
279
295
  // Status bar
280
296
  console.log(chalk.cyan('╠' + '─'.repeat(W) + '╣'));
@@ -1,9 +1,4 @@
1
- /**
2
- * AI Agents Configuration Page
3
- *
4
- * Allows users to configure AI providers for trading strategies.
5
- * Supports both CLIProxy (paid plans) and direct API keys.
6
- */
1
+ /** AI Agents Configuration Page - CLIProxy (OAuth) + LLM Proxy (API Key) */
7
2
 
8
3
  const chalk = require('chalk');
9
4
  const os = require('os');
@@ -27,16 +22,24 @@ const clearWithBanner = () => {
27
22
  const CONFIG_DIR = path.join(os.homedir(), '.hqx');
28
23
  const CONFIG_FILE = path.join(CONFIG_DIR, 'ai-config.json');
29
24
 
30
- // AI Providers list
25
+ // AI Providers list with OAuth (paid plan) and API Key support
26
+ // CLIProxyAPI (port 8317): OAuth for Anthropic, OpenAI, Google, Qwen, iFlow
27
+ // LLM Proxy (port 8318): API Key for all providers via LiteLLM
31
28
  const AI_PROVIDERS = [
32
- { id: 'anthropic', name: 'Anthropic (Claude)', color: 'magenta' },
33
- { id: 'openai', name: 'OpenAI (GPT)', color: 'green' },
34
- { id: 'google', name: 'Google (Gemini)', color: 'blue' },
35
- { id: 'mistral', name: 'Mistral AI', color: 'yellow' },
36
- { id: 'groq', name: 'Groq', color: 'cyan' },
37
- { id: 'xai', name: 'xAI (Grok)', color: 'white' },
38
- { id: 'perplexity', name: 'Perplexity', color: 'blue' },
39
- { id: 'openrouter', name: 'OpenRouter', color: 'gray' },
29
+ // OAuth + API Key supported (can use paid plan OR API key)
30
+ { id: 'anthropic', name: 'Anthropic (Claude)', color: 'magenta', supportsOAuth: true, supportsApiKey: true },
31
+ { id: 'openai', name: 'OpenAI (GPT)', color: 'green', supportsOAuth: true, supportsApiKey: true },
32
+ { id: 'google', name: 'Google (Gemini)', color: 'blue', supportsOAuth: true, supportsApiKey: true },
33
+ { id: 'qwen', name: 'Qwen', color: 'cyan', supportsOAuth: true, supportsApiKey: true },
34
+ { id: 'iflow', name: 'iFlow (DeepSeek/GLM)', color: 'yellow', supportsOAuth: true, supportsApiKey: true },
35
+ // API Key only (no OAuth - uses LLM Proxy via LiteLLM)
36
+ { id: 'deepseek', name: 'DeepSeek', color: 'blue', supportsOAuth: false, supportsApiKey: true },
37
+ { id: 'minimax', name: 'MiniMax', color: 'magenta', supportsOAuth: false, supportsApiKey: true },
38
+ { id: 'mistral', name: 'Mistral AI', color: 'yellow', supportsOAuth: false, supportsApiKey: true },
39
+ { id: 'groq', name: 'Groq', color: 'cyan', supportsOAuth: false, supportsApiKey: true },
40
+ { id: 'xai', name: 'xAI (Grok)', color: 'white', supportsOAuth: false, supportsApiKey: true },
41
+ { id: 'perplexity', name: 'Perplexity', color: 'blue', supportsOAuth: false, supportsApiKey: true },
42
+ { id: 'openrouter', name: 'OpenRouter', color: 'gray', supportsOAuth: false, supportsApiKey: true },
40
43
  ];
41
44
 
42
45
  /** Load AI config from file */
@@ -342,6 +345,10 @@ const handleApiKeyConnection = async (provider, config) => {
342
345
  const handleProviderConfig = async (provider, config) => {
343
346
  const boxWidth = getLogoWidth();
344
347
 
348
+ // Check provider capabilities
349
+ const supportsOAuth = provider.supportsOAuth !== false;
350
+ const supportsApiKey = provider.supportsApiKey !== false;
351
+
345
352
  while (true) {
346
353
  clearWithBanner();
347
354
  drawProviderWindow(provider, config, boxWidth);
@@ -360,11 +367,21 @@ const handleProviderConfig = async (provider, config) => {
360
367
  }
361
368
 
362
369
  if (choice === '1') {
363
- await handleCliProxyConnection(provider, config, boxWidth);
370
+ if (supportsOAuth && supportsApiKey) {
371
+ // Both supported: [1] = OAuth via CLIProxy
372
+ await handleCliProxyConnection(provider, config, boxWidth);
373
+ } else if (supportsApiKey) {
374
+ // API Key only: [1] = API Key via LLM Proxy
375
+ await handleApiKeyConnection(provider, config);
376
+ } else if (supportsOAuth) {
377
+ // OAuth only: [1] = OAuth via CLIProxy
378
+ await handleCliProxyConnection(provider, config, boxWidth);
379
+ }
364
380
  continue;
365
381
  }
366
382
 
367
- if (choice === '2') {
383
+ if (choice === '2' && supportsOAuth && supportsApiKey) {
384
+ // Only available when both are supported: [2] = API Key
368
385
  await handleApiKeyConnection(provider, config);
369
386
  continue;
370
387
  }
@@ -15,6 +15,8 @@ const API_ENDPOINTS = {
15
15
  anthropic: 'https://api.anthropic.com/v1/models',
16
16
  openai: 'https://api.openai.com/v1/models',
17
17
  google: 'https://generativelanguage.googleapis.com/v1beta/models', // v1beta for Gemini 3
18
+ deepseek: 'https://api.deepseek.com/v1/models',
19
+ minimax: 'https://api.minimax.chat/v1/models',
18
20
  mistral: 'https://api.mistral.ai/v1/models',
19
21
  groq: 'https://api.groq.com/openai/v1/models',
20
22
  xai: 'https://api.x.ai/v1/models',
@@ -85,15 +87,16 @@ const getAuthHeaders = (providerId, apiKey) => {
85
87
  case 'anthropic':
86
88
  return { 'x-api-key': apiKey, 'anthropic-version': '2023-06-01' };
87
89
  case 'openai':
90
+ case 'deepseek':
91
+ case 'minimax':
88
92
  case 'groq':
89
93
  case 'xai':
90
94
  case 'perplexity':
91
95
  case 'openrouter':
96
+ case 'mistral':
92
97
  return { 'Authorization': `Bearer ${apiKey}` };
93
98
  case 'google':
94
99
  return {}; // Google uses query param
95
- case 'mistral':
96
- return { 'Authorization': `Bearer ${apiKey}` };
97
100
  default:
98
101
  return { 'Authorization': `Bearer ${apiKey}` };
99
102
  }
@@ -235,6 +238,27 @@ const parseModelsResponse = (providerId, data) => {
235
238
  }));
236
239
  break;
237
240
 
241
+ case 'deepseek':
242
+ // DeepSeek format: { data: [{ id, ... }] } - OpenAI compatible
243
+ models = (data.data || [])
244
+ .filter(m => m.id && !shouldExcludeModel(m.id))
245
+ .filter(m => m.id.includes('deepseek'))
246
+ .map(m => ({
247
+ id: m.id,
248
+ name: m.id
249
+ }));
250
+ break;
251
+
252
+ case 'minimax':
253
+ // MiniMax format: { data: [{ id, ... }] } or { models: [...] }
254
+ models = (data.data || data.models || [])
255
+ .filter(m => (m.id || m.model) && !shouldExcludeModel(m.id || m.model))
256
+ .map(m => ({
257
+ id: m.id || m.model,
258
+ name: m.id || m.model
259
+ }));
260
+ break;
261
+
238
262
  case 'xai':
239
263
  // xAI format: { data: [{ id, ... }] }
240
264
  models = (data.data || [])
@@ -2,13 +2,14 @@
2
2
  * @fileoverview Services module exports
3
3
  * @module services
4
4
  *
5
- * Rithmic-only service hub + AI Supervision
5
+ * Rithmic-only service hub + AI Supervision + Dual Proxy Support
6
6
  */
7
7
 
8
8
  const { RithmicService } = require('./rithmic/index');
9
9
  const { HQXServerService } = require('./hqx-server/index');
10
10
  const { storage, connections } = require('./session');
11
11
  const aiSupervision = require('./ai-supervision');
12
+ const llmproxy = require('./llmproxy');
12
13
 
13
14
  module.exports = {
14
15
  // Platform Service (Rithmic only)
@@ -23,4 +24,7 @@ module.exports = {
23
24
 
24
25
  // AI Supervision
25
26
  aiSupervision,
27
+
28
+ // LLM API Proxy (for API key providers via LiteLLM)
29
+ llmproxy,
26
30
  };
@@ -0,0 +1,166 @@
1
+ /**
2
+ * LLM API Proxy Service
3
+ *
4
+ * Uses LiteLLM (Python) to provide a unified OpenAI-compatible proxy
5
+ * for 50+ LLM providers via API keys.
6
+ *
7
+ * Port: 8318 (different from CLIProxyAPI which uses 8317)
8
+ *
9
+ * Supported providers (API Key only):
10
+ * - MiniMax, DeepSeek, Groq, Mistral, xAI, Perplexity, OpenRouter
11
+ * - And 50+ more via LiteLLM
12
+ */
13
+
14
+ const { LLMProxyManager } = require('./manager');
15
+
16
+ // Singleton instance
17
+ let proxyManager = null;
18
+
19
+ /**
20
+ * Get or create proxy manager instance
21
+ * @returns {LLMProxyManager}
22
+ */
23
+ const getManager = () => {
24
+ if (!proxyManager) {
25
+ proxyManager = new LLMProxyManager();
26
+ }
27
+ return proxyManager;
28
+ };
29
+
30
+ /**
31
+ * Check if LLM Proxy is installed (Python venv + LiteLLM)
32
+ * @returns {boolean}
33
+ */
34
+ const isInstalled = () => {
35
+ return getManager().isInstalled();
36
+ };
37
+
38
+ /**
39
+ * Install LLM Proxy (creates Python venv, installs LiteLLM)
40
+ * @param {Function} onProgress - Progress callback (message, percent)
41
+ * @returns {Promise<{success: boolean, error?: string}>}
42
+ */
43
+ const install = async (onProgress = () => {}) => {
44
+ return getManager().install(onProgress);
45
+ };
46
+
47
+ /**
48
+ * Check if LLM Proxy is running
49
+ * @returns {Promise<{running: boolean, port?: number}>}
50
+ */
51
+ const isRunning = async () => {
52
+ return getManager().isRunning();
53
+ };
54
+
55
+ /**
56
+ * Start LLM Proxy server
57
+ * @returns {Promise<{success: boolean, error?: string}>}
58
+ */
59
+ const start = async () => {
60
+ return getManager().start();
61
+ };
62
+
63
+ /**
64
+ * Stop LLM Proxy server
65
+ * @returns {Promise<{success: boolean, error?: string}>}
66
+ */
67
+ const stop = async () => {
68
+ return getManager().stop();
69
+ };
70
+
71
+ /**
72
+ * Set API key for a provider
73
+ * @param {string} providerId - Provider ID (e.g., 'minimax', 'deepseek')
74
+ * @param {string} apiKey - API key
75
+ * @returns {Promise<{success: boolean, error?: string}>}
76
+ */
77
+ const setApiKey = async (providerId, apiKey) => {
78
+ return getManager().setApiKey(providerId, apiKey);
79
+ };
80
+
81
+ /**
82
+ * Get API key for a provider
83
+ * @param {string} providerId - Provider ID
84
+ * @returns {string|null}
85
+ */
86
+ const getApiKey = (providerId) => {
87
+ return getManager().getApiKey(providerId);
88
+ };
89
+
90
+ /**
91
+ * Test connection to a provider
92
+ * @param {string} providerId - Provider ID
93
+ * @param {string} modelId - Model ID to test
94
+ * @returns {Promise<{success: boolean, latency?: number, error?: string}>}
95
+ */
96
+ const testConnection = async (providerId, modelId) => {
97
+ return getManager().testConnection(providerId, modelId);
98
+ };
99
+
100
+ /**
101
+ * Make a chat completion request via LLM Proxy
102
+ * @param {string} providerId - Provider ID
103
+ * @param {string} modelId - Model ID
104
+ * @param {Array} messages - Chat messages
105
+ * @param {Object} options - Additional options (temperature, max_tokens, etc.)
106
+ * @returns {Promise<{success: boolean, response?: Object, error?: string}>}
107
+ */
108
+ const chatCompletion = async (providerId, modelId, messages, options = {}) => {
109
+ return getManager().chatCompletion(providerId, modelId, messages, options);
110
+ };
111
+
112
+ /**
113
+ * Get LLM Proxy base URL
114
+ * @returns {string}
115
+ */
116
+ const getBaseUrl = () => {
117
+ return getManager().getBaseUrl();
118
+ };
119
+
120
+ /**
121
+ * Get port
122
+ * @returns {number}
123
+ */
124
+ const getPort = () => {
125
+ return getManager().port;
126
+ };
127
+
128
+ /**
129
+ * Provider mapping for LiteLLM model prefixes
130
+ */
131
+ const PROVIDER_PREFIXES = {
132
+ minimax: 'minimax/',
133
+ deepseek: 'deepseek/',
134
+ groq: 'groq/',
135
+ mistral: 'mistral/',
136
+ xai: 'xai/',
137
+ perplexity: 'perplexity/',
138
+ openrouter: 'openrouter/',
139
+ together: 'together_ai/',
140
+ anyscale: 'anyscale/',
141
+ fireworks: 'fireworks_ai/',
142
+ cohere: 'cohere/',
143
+ ai21: 'ai21/',
144
+ nlp_cloud: 'nlp_cloud/',
145
+ replicate: 'replicate/',
146
+ bedrock: 'bedrock/',
147
+ sagemaker: 'sagemaker/',
148
+ vertex: 'vertex_ai/',
149
+ palm: 'palm/',
150
+ azure: 'azure/',
151
+ };
152
+
153
+ module.exports = {
154
+ isInstalled,
155
+ install,
156
+ isRunning,
157
+ start,
158
+ stop,
159
+ setApiKey,
160
+ getApiKey,
161
+ testConnection,
162
+ chatCompletion,
163
+ getBaseUrl,
164
+ getPort,
165
+ PROVIDER_PREFIXES,
166
+ };
@@ -0,0 +1,411 @@
1
+ /**
2
+ * LLM Proxy Manager
3
+ *
4
+ * Manages LiteLLM proxy server installation, configuration and lifecycle.
5
+ * Uses Python virtual environment for isolation.
6
+ */
7
+
8
+ const { spawn, execSync } = require('child_process');
9
+ const path = require('path');
10
+ const fs = require('fs');
11
+ const os = require('os');
12
+ const http = require('http');
13
+
14
+ // Configuration
15
+ const LLMPROXY_DIR = path.join(os.homedir(), '.hqx', 'llmproxy');
16
+ const VENV_DIR = path.join(LLMPROXY_DIR, 'venv');
17
+ const ENV_FILE = path.join(LLMPROXY_DIR, '.env');
18
+ const PID_FILE = path.join(LLMPROXY_DIR, 'llmproxy.pid');
19
+ const LOG_FILE = path.join(LLMPROXY_DIR, 'llmproxy.log');
20
+ const DEFAULT_PORT = 8318;
21
+
22
+ /**
23
+ * LLM Proxy Manager Class
24
+ */
25
+ class LLMProxyManager {
26
+ constructor() {
27
+ this.port = DEFAULT_PORT;
28
+ this.process = null;
29
+ }
30
+
31
+ /**
32
+ * Get Python executable path in venv
33
+ */
34
+ getPythonPath() {
35
+ const isWindows = process.platform === 'win32';
36
+ return isWindows
37
+ ? path.join(VENV_DIR, 'Scripts', 'python.exe')
38
+ : path.join(VENV_DIR, 'bin', 'python');
39
+ }
40
+
41
+ /**
42
+ * Get pip executable path in venv
43
+ */
44
+ getPipPath() {
45
+ const isWindows = process.platform === 'win32';
46
+ return isWindows
47
+ ? path.join(VENV_DIR, 'Scripts', 'pip.exe')
48
+ : path.join(VENV_DIR, 'bin', 'pip');
49
+ }
50
+
51
+ /**
52
+ * Check if LLM Proxy is installed
53
+ */
54
+ isInstalled() {
55
+ try {
56
+ const pythonPath = this.getPythonPath();
57
+ if (!fs.existsSync(pythonPath)) return false;
58
+
59
+ // Check if litellm is installed
60
+ execSync(`"${pythonPath}" -c "import litellm"`, { stdio: 'ignore' });
61
+ return true;
62
+ } catch {
63
+ return false;
64
+ }
65
+ }
66
+
67
+ /**
68
+ * Install LLM Proxy (Python venv + LiteLLM)
69
+ */
70
+ async install(onProgress = () => {}) {
71
+ try {
72
+ // Create directory
73
+ if (!fs.existsSync(LLMPROXY_DIR)) {
74
+ fs.mkdirSync(LLMPROXY_DIR, { recursive: true });
75
+ }
76
+
77
+ onProgress('Creating Python virtual environment', 10);
78
+
79
+ // Check for Python
80
+ let pythonCmd = 'python3';
81
+ try {
82
+ execSync('python3 --version', { stdio: 'ignore' });
83
+ } catch {
84
+ try {
85
+ execSync('python --version', { stdio: 'ignore' });
86
+ pythonCmd = 'python';
87
+ } catch {
88
+ return { success: false, error: 'Python not found. Install Python 3.8+' };
89
+ }
90
+ }
91
+
92
+ // Create venv
93
+ if (!fs.existsSync(VENV_DIR)) {
94
+ execSync(`${pythonCmd} -m venv "${VENV_DIR}"`, { stdio: 'ignore' });
95
+ }
96
+
97
+ onProgress('Installing LiteLLM', 40);
98
+
99
+ // Install litellm
100
+ const pipPath = this.getPipPath();
101
+ execSync(`"${pipPath}" install --upgrade pip`, { stdio: 'ignore' });
102
+ execSync(`"${pipPath}" install litellm[proxy]`, { stdio: 'ignore', timeout: 300000 });
103
+
104
+ onProgress('Installation complete', 100);
105
+
106
+ return { success: true };
107
+ } catch (error) {
108
+ return { success: false, error: error.message };
109
+ }
110
+ }
111
+
112
+ /**
113
+ * Check if LLM Proxy is running
114
+ */
115
+ async isRunning() {
116
+ try {
117
+ // Check PID file
118
+ if (fs.existsSync(PID_FILE)) {
119
+ const pid = parseInt(fs.readFileSync(PID_FILE, 'utf8').trim());
120
+ try {
121
+ process.kill(pid, 0); // Check if process exists
122
+ // Verify it's responding
123
+ const health = await this.healthCheck();
124
+ if (health.success) {
125
+ return { running: true, port: this.port, pid };
126
+ }
127
+ } catch {
128
+ // PID exists but process doesn't - clean up
129
+ fs.unlinkSync(PID_FILE);
130
+ }
131
+ }
132
+ return { running: false };
133
+ } catch {
134
+ return { running: false };
135
+ }
136
+ }
137
+
138
+ /**
139
+ * Health check - ping the proxy
140
+ */
141
+ healthCheck() {
142
+ return new Promise((resolve) => {
143
+ const req = http.request({
144
+ hostname: 'localhost',
145
+ port: this.port,
146
+ path: '/health',
147
+ method: 'GET',
148
+ timeout: 5000
149
+ }, (res) => {
150
+ resolve({ success: res.statusCode === 200 });
151
+ });
152
+ req.on('error', () => resolve({ success: false }));
153
+ req.on('timeout', () => { req.destroy(); resolve({ success: false }); });
154
+ req.end();
155
+ });
156
+ }
157
+
158
+ /**
159
+ * Load environment variables from .env file
160
+ */
161
+ loadEnvFile() {
162
+ if (!fs.existsSync(ENV_FILE)) return {};
163
+ const content = fs.readFileSync(ENV_FILE, 'utf8');
164
+ const env = {};
165
+ for (const line of content.split('\n')) {
166
+ const match = line.match(/^([^=]+)=(.*)$/);
167
+ if (match) {
168
+ env[match[1].trim()] = match[2].trim();
169
+ }
170
+ }
171
+ return env;
172
+ }
173
+
174
+ /**
175
+ * Save environment variable to .env file
176
+ */
177
+ saveEnvVar(key, value) {
178
+ const env = this.loadEnvFile();
179
+ env[key] = value;
180
+ const content = Object.entries(env)
181
+ .map(([k, v]) => `${k}=${v}`)
182
+ .join('\n');
183
+ fs.writeFileSync(ENV_FILE, content);
184
+ }
185
+
186
+ /**
187
+ * Start LLM Proxy server
188
+ */
189
+ async start() {
190
+ try {
191
+ // Check if already running
192
+ const status = await this.isRunning();
193
+ if (status.running) {
194
+ return { success: true, message: 'Already running' };
195
+ }
196
+
197
+ if (!this.isInstalled()) {
198
+ return { success: false, error: 'LLM Proxy not installed. Run install() first.' };
199
+ }
200
+
201
+ const pythonPath = this.getPythonPath();
202
+ const env = { ...process.env, ...this.loadEnvFile() };
203
+
204
+ // Start LiteLLM proxy
205
+ const proc = spawn(pythonPath, [
206
+ '-m', 'litellm',
207
+ '--port', String(this.port),
208
+ '--host', '0.0.0.0'
209
+ ], {
210
+ cwd: LLMPROXY_DIR,
211
+ env,
212
+ detached: true,
213
+ stdio: ['ignore', 'pipe', 'pipe']
214
+ });
215
+
216
+ // Write logs
217
+ const logStream = fs.createWriteStream(LOG_FILE, { flags: 'a' });
218
+ proc.stdout.pipe(logStream);
219
+ proc.stderr.pipe(logStream);
220
+
221
+ // Save PID
222
+ fs.writeFileSync(PID_FILE, String(proc.pid));
223
+ proc.unref();
224
+
225
+ // Wait for startup
226
+ await new Promise(r => setTimeout(r, 3000));
227
+
228
+ // Verify running
229
+ const health = await this.healthCheck();
230
+ if (!health.success) {
231
+ return { success: false, error: 'Proxy started but not responding' };
232
+ }
233
+
234
+ return { success: true, port: this.port, pid: proc.pid };
235
+ } catch (error) {
236
+ return { success: false, error: error.message };
237
+ }
238
+ }
239
+
240
+ /**
241
+ * Stop LLM Proxy server
242
+ */
243
+ async stop() {
244
+ try {
245
+ if (fs.existsSync(PID_FILE)) {
246
+ const pid = parseInt(fs.readFileSync(PID_FILE, 'utf8').trim());
247
+ try {
248
+ process.kill(pid, 'SIGTERM');
249
+ await new Promise(r => setTimeout(r, 1000));
250
+ try { process.kill(pid, 'SIGKILL'); } catch {}
251
+ } catch {}
252
+ fs.unlinkSync(PID_FILE);
253
+ }
254
+ return { success: true };
255
+ } catch (error) {
256
+ return { success: false, error: error.message };
257
+ }
258
+ }
259
+
260
+ /**
261
+ * Set API key for a provider
262
+ */
263
+ async setApiKey(providerId, apiKey) {
264
+ try {
265
+ const envKey = this.getEnvKeyName(providerId);
266
+ this.saveEnvVar(envKey, apiKey);
267
+
268
+ // Restart proxy if running to pick up new key
269
+ const status = await this.isRunning();
270
+ if (status.running) {
271
+ await this.stop();
272
+ await this.start();
273
+ }
274
+
275
+ return { success: true };
276
+ } catch (error) {
277
+ return { success: false, error: error.message };
278
+ }
279
+ }
280
+
281
+ /**
282
+ * Get API key for a provider
283
+ */
284
+ getApiKey(providerId) {
285
+ const envKey = this.getEnvKeyName(providerId);
286
+ const env = this.loadEnvFile();
287
+ return env[envKey] || null;
288
+ }
289
+
290
+ /**
291
+ * Get environment variable name for provider API key
292
+ */
293
+ getEnvKeyName(providerId) {
294
+ const mapping = {
295
+ minimax: 'MINIMAX_API_KEY',
296
+ deepseek: 'DEEPSEEK_API_KEY',
297
+ groq: 'GROQ_API_KEY',
298
+ mistral: 'MISTRAL_API_KEY',
299
+ xai: 'XAI_API_KEY',
300
+ perplexity: 'PERPLEXITYAI_API_KEY',
301
+ openrouter: 'OPENROUTER_API_KEY',
302
+ together: 'TOGETHERAI_API_KEY',
303
+ fireworks: 'FIREWORKS_AI_API_KEY',
304
+ cohere: 'COHERE_API_KEY',
305
+ ai21: 'AI21_API_KEY',
306
+ replicate: 'REPLICATE_API_KEY',
307
+ anthropic: 'ANTHROPIC_API_KEY',
308
+ openai: 'OPENAI_API_KEY',
309
+ google: 'GEMINI_API_KEY',
310
+ };
311
+ return mapping[providerId] || `${providerId.toUpperCase()}_API_KEY`;
312
+ }
313
+
314
+ /**
315
+ * Test connection to a provider
316
+ */
317
+ async testConnection(providerId, modelId) {
318
+ try {
319
+ const start = Date.now();
320
+ const result = await this.chatCompletion(providerId, modelId, [
321
+ { role: 'user', content: 'Say "OK" in one word.' }
322
+ ], { max_tokens: 5 });
323
+
324
+ if (result.success) {
325
+ return { success: true, latency: Date.now() - start };
326
+ }
327
+ return { success: false, error: result.error };
328
+ } catch (error) {
329
+ return { success: false, error: error.message };
330
+ }
331
+ }
332
+
333
+ /**
334
+ * Make chat completion request via LLM Proxy
335
+ */
336
+ async chatCompletion(providerId, modelId, messages, options = {}) {
337
+ return new Promise((resolve) => {
338
+ const modelPrefix = this.getModelPrefix(providerId);
339
+ const fullModelId = modelId.includes('/') ? modelId : `${modelPrefix}${modelId}`;
340
+
341
+ const body = JSON.stringify({
342
+ model: fullModelId,
343
+ messages,
344
+ ...options
345
+ });
346
+
347
+ const req = http.request({
348
+ hostname: 'localhost',
349
+ port: this.port,
350
+ path: '/v1/chat/completions',
351
+ method: 'POST',
352
+ headers: {
353
+ 'Content-Type': 'application/json',
354
+ 'Content-Length': Buffer.byteLength(body)
355
+ },
356
+ timeout: 60000
357
+ }, (res) => {
358
+ let data = '';
359
+ res.on('data', chunk => data += chunk);
360
+ res.on('end', () => {
361
+ try {
362
+ const parsed = JSON.parse(data);
363
+ if (res.statusCode >= 200 && res.statusCode < 300) {
364
+ resolve({ success: true, response: parsed });
365
+ } else {
366
+ resolve({ success: false, error: parsed.error?.message || `HTTP ${res.statusCode}` });
367
+ }
368
+ } catch {
369
+ resolve({ success: false, error: 'Invalid response' });
370
+ }
371
+ });
372
+ });
373
+
374
+ req.on('error', (err) => resolve({ success: false, error: err.message }));
375
+ req.on('timeout', () => { req.destroy(); resolve({ success: false, error: 'Timeout' }); });
376
+ req.write(body);
377
+ req.end();
378
+ });
379
+ }
380
+
381
+ /**
382
+ * Get LiteLLM model prefix for provider
383
+ */
384
+ getModelPrefix(providerId) {
385
+ const prefixes = {
386
+ minimax: 'minimax/',
387
+ deepseek: 'deepseek/',
388
+ groq: 'groq/',
389
+ mistral: 'mistral/',
390
+ xai: 'xai/',
391
+ perplexity: 'perplexity/',
392
+ openrouter: 'openrouter/',
393
+ together: 'together_ai/',
394
+ fireworks: 'fireworks_ai/',
395
+ cohere: 'cohere/',
396
+ anthropic: 'anthropic/',
397
+ openai: 'openai/',
398
+ google: 'gemini/',
399
+ };
400
+ return prefixes[providerId] || `${providerId}/`;
401
+ }
402
+
403
+ /**
404
+ * Get base URL for LLM Proxy
405
+ */
406
+ getBaseUrl() {
407
+ return `http://localhost:${this.port}`;
408
+ }
409
+ }
410
+
411
+ module.exports = { LLMProxyManager };