hedgequantx 2.6.161 → 2.6.162

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/package.json +1 -1
  2. package/src/menus/ai-agent-connect.js +181 -0
  3. package/src/menus/ai-agent-models.js +219 -0
  4. package/src/menus/ai-agent-oauth.js +292 -0
  5. package/src/menus/ai-agent-ui.js +141 -0
  6. package/src/menus/ai-agent.js +88 -1489
  7. package/src/pages/algo/copy-engine.js +449 -0
  8. package/src/pages/algo/copy-trading.js +11 -543
  9. package/src/pages/algo/smart-logs-data.js +218 -0
  10. package/src/pages/algo/smart-logs.js +9 -214
  11. package/src/pages/algo/ui-constants.js +144 -0
  12. package/src/pages/algo/ui-summary.js +184 -0
  13. package/src/pages/algo/ui.js +42 -526
  14. package/src/pages/stats-calculations.js +191 -0
  15. package/src/pages/stats-ui.js +381 -0
  16. package/src/pages/stats.js +14 -507
  17. package/src/services/ai/client-analysis.js +194 -0
  18. package/src/services/ai/client-models.js +333 -0
  19. package/src/services/ai/client.js +6 -489
  20. package/src/services/ai/index.js +2 -257
  21. package/src/services/ai/proxy-install.js +249 -0
  22. package/src/services/ai/proxy-manager.js +29 -411
  23. package/src/services/ai/proxy-remote.js +161 -0
  24. package/src/services/ai/supervisor-optimize.js +215 -0
  25. package/src/services/ai/supervisor-sync.js +178 -0
  26. package/src/services/ai/supervisor.js +50 -515
  27. package/src/services/ai/validation.js +250 -0
  28. package/src/services/hqx-server-events.js +110 -0
  29. package/src/services/hqx-server-handlers.js +217 -0
  30. package/src/services/hqx-server-latency.js +136 -0
  31. package/src/services/hqx-server.js +51 -403
  32. package/src/services/position-constants.js +28 -0
  33. package/src/services/position-manager.js +105 -554
  34. package/src/services/position-momentum.js +206 -0
  35. package/src/services/projectx/accounts.js +142 -0
  36. package/src/services/projectx/index.js +40 -289
  37. package/src/services/projectx/trading.js +180 -0
  38. package/src/services/rithmic/handlers.js +2 -208
  39. package/src/services/rithmic/index.js +32 -542
  40. package/src/services/rithmic/latency-tracker.js +182 -0
  41. package/src/services/rithmic/specs.js +146 -0
  42. package/src/services/rithmic/trade-history.js +254 -0
@@ -0,0 +1,194 @@
1
+ /**
2
+ * @fileoverview AI Client - Trading Analysis Functions
3
+ *
4
+ * Functions to analyze trading data with AI
5
+ * Uses callAI from client.js core
6
+ */
7
+
8
+ /**
9
+ * Analyze trading data with AI
10
+ * @param {Function} callAI - The callAI function from client.js
11
+ * @param {Object} agent - AI agent
12
+ * @param {Object} data - Trading data from APIs
13
+ * @returns {Promise<Object|null>} Analysis result or null
14
+ */
15
+ const analyzeTrading = async (callAI, agent, data) => {
16
+ if (!agent || !data) return null;
17
+
18
+ const systemPrompt = `You are a professional trading analyst for prop firm futures trading.
19
+ Analyze the provided real-time trading data and provide actionable insights.
20
+ Be concise. Focus on risk management and optimization.
21
+ Respond in JSON format with: { "action": "HOLD|REDUCE_SIZE|PAUSE|CONTINUE", "confidence": 0-100, "reason": "brief reason" }`;
22
+
23
+ const prompt = `Current trading session data:
24
+ - Account Balance: ${data.account?.balance ?? 'N/A'}
25
+ - Today P&L: ${data.account?.profitAndLoss ?? 'N/A'}
26
+ - Open Positions: ${data.positions?.length ?? 0}
27
+ - Open Orders: ${data.orders?.length ?? 0}
28
+ - Today Trades: ${data.trades?.length ?? 0}
29
+
30
+ ${data.positions?.length > 0 ? `Positions: ${JSON.stringify(data.positions.map(p => ({
31
+ symbol: p.symbol || p.contractId,
32
+ qty: p.quantity,
33
+ pnl: p.profitAndLoss
34
+ })))}` : ''}
35
+
36
+ Analyze and provide recommendation.`;
37
+
38
+ try {
39
+ const response = await callAI(agent, prompt, systemPrompt);
40
+ if (!response) return null;
41
+
42
+ const jsonMatch = response.match(/\{[\s\S]*\}/);
43
+ if (jsonMatch) {
44
+ return JSON.parse(jsonMatch[0]);
45
+ }
46
+
47
+ return null;
48
+ } catch (error) {
49
+ return null;
50
+ }
51
+ };
52
+
53
+ /**
54
+ * Analyze strategy performance and suggest optimizations
55
+ * @param {Function} callAI - The callAI function from client.js
56
+ * @param {Object} agent - AI agent
57
+ * @param {Object} performanceData - Strategy performance data
58
+ * @returns {Promise<Object|null>} Optimization suggestions
59
+ */
60
+ const analyzePerformance = async (callAI, agent, performanceData) => {
61
+ if (!agent || !performanceData) return null;
62
+
63
+ const systemPrompt = `You are an AI supervisor for HQX Ultra Scalping, a professional prop firm futures trading strategy.
64
+
65
+ The strategy uses advanced mathematical models:
66
+ - Order flow analysis (delta, cumulative delta, absorption)
67
+ - Market microstructure (bid/ask imbalance, volume profile)
68
+ - Statistical edge detection (z-score, standard deviation bands)
69
+ - Dynamic risk management (Kelly criterion, volatility-adjusted sizing)
70
+
71
+ Your job is to analyze performance data and suggest parameter optimizations.
72
+ Be precise and actionable. Focus on improving win rate, reducing drawdown, and optimizing risk/reward.
73
+
74
+ Respond ONLY in valid JSON format:
75
+ {
76
+ "assessment": "brief performance assessment",
77
+ "winRateAnalysis": "analysis of win/loss patterns",
78
+ "riskAnalysis": "analysis of risk management",
79
+ "optimizations": [
80
+ { "param": "parameter_name", "current": "current_value", "suggested": "new_value", "reason": "why" }
81
+ ],
82
+ "marketCondition": "trending|ranging|volatile|calm",
83
+ "confidence": 0-100
84
+ }`;
85
+
86
+ const prompt = `STRATEGY PERFORMANCE DATA - ANALYZE AND OPTIMIZE
87
+
88
+ Session Stats:
89
+ - Trades: ${performanceData.trades || 0}
90
+ - Wins: ${performanceData.wins || 0}
91
+ - Losses: ${performanceData.losses || 0}
92
+ - Win Rate: ${performanceData.winRate ? (performanceData.winRate * 100).toFixed(1) + '%' : 'N/A'}
93
+ - Total P&L: $${performanceData.pnl?.toFixed(2) || '0.00'}
94
+ - Avg Win: $${performanceData.avgWin?.toFixed(2) || 'N/A'}
95
+ - Avg Loss: $${performanceData.avgLoss?.toFixed(2) || 'N/A'}
96
+ - Largest Win: $${performanceData.largestWin?.toFixed(2) || 'N/A'}
97
+ - Largest Loss: $${performanceData.largestLoss?.toFixed(2) || 'N/A'}
98
+ - Max Drawdown: $${performanceData.maxDrawdown?.toFixed(2) || 'N/A'}
99
+ - Profit Factor: ${performanceData.profitFactor?.toFixed(2) || 'N/A'}
100
+
101
+ Current Parameters:
102
+ - Position Size: ${performanceData.positionSize || 'N/A'} contracts
103
+ - Daily Target: $${performanceData.dailyTarget || 'N/A'}
104
+ - Max Risk: $${performanceData.maxRisk || 'N/A'}
105
+ - Symbol: ${performanceData.symbol || 'N/A'}
106
+
107
+ Recent Trades:
108
+ ${performanceData.recentTrades?.map(t =>
109
+ `- ${t.side} ${t.qty}x @ ${t.price} → P&L: $${t.pnl?.toFixed(2) || 'N/A'}`
110
+ ).join('\n') || 'No recent trades'}
111
+
112
+ Market Context:
113
+ - Volatility: ${performanceData.volatility || 'N/A'}
114
+ - Trend: ${performanceData.trend || 'N/A'}
115
+ - Session: ${performanceData.session || 'N/A'}
116
+
117
+ Analyze and suggest optimizations to improve performance.`;
118
+
119
+ try {
120
+ const response = await callAI(agent, prompt, systemPrompt);
121
+ if (!response) return null;
122
+
123
+ const jsonMatch = response.match(/\{[\s\S]*\}/);
124
+ if (jsonMatch) {
125
+ return JSON.parse(jsonMatch[0]);
126
+ }
127
+
128
+ return null;
129
+ } catch (error) {
130
+ return null;
131
+ }
132
+ };
133
+
134
+ /**
135
+ * Get real-time trading advice based on current market conditions
136
+ * @param {Function} callAI - The callAI function from client.js
137
+ * @param {Object} agent - AI agent
138
+ * @param {Object} marketData - Current market data
139
+ * @returns {Promise<Object|null>} Trading advice
140
+ */
141
+ const getMarketAdvice = async (callAI, agent, marketData) => {
142
+ if (!agent || !marketData) return null;
143
+
144
+ const systemPrompt = `You are an AI supervisor for HQX Ultra Scalping futures strategy.
145
+ Analyze real-time market data and provide actionable advice.
146
+ Be concise and precise. The strategy will use your recommendations.
147
+
148
+ Respond ONLY in valid JSON:
149
+ {
150
+ "action": "AGGRESSIVE|NORMAL|CAUTIOUS|PAUSE",
151
+ "sizeMultiplier": 0.5-1.5,
152
+ "reason": "brief reason",
153
+ "confidence": 0-100
154
+ }`;
155
+
156
+ const prompt = `REAL-TIME MARKET ANALYSIS
157
+
158
+ Current Price: ${marketData.price || 'N/A'}
159
+ Bid: ${marketData.bid || 'N/A'} | Ask: ${marketData.ask || 'N/A'}
160
+ Spread: ${marketData.spread || 'N/A'}
161
+ Volume: ${marketData.volume || 'N/A'}
162
+ Delta: ${marketData.delta || 'N/A'}
163
+ Volatility: ${marketData.volatility || 'N/A'}
164
+
165
+ Recent Price Action:
166
+ - High: ${marketData.high || 'N/A'}
167
+ - Low: ${marketData.low || 'N/A'}
168
+ - Range: ${marketData.range || 'N/A'}
169
+
170
+ Current Position: ${marketData.position || 'FLAT'}
171
+ Session P&L: $${marketData.pnl?.toFixed(2) || '0.00'}
172
+
173
+ What should the strategy do?`;
174
+
175
+ try {
176
+ const response = await callAI(agent, prompt, systemPrompt);
177
+ if (!response) return null;
178
+
179
+ const jsonMatch = response.match(/\{[\s\S]*\}/);
180
+ if (jsonMatch) {
181
+ return JSON.parse(jsonMatch[0]);
182
+ }
183
+
184
+ return null;
185
+ } catch (error) {
186
+ return null;
187
+ }
188
+ };
189
+
190
+ module.exports = {
191
+ analyzeTrading,
192
+ analyzePerformance,
193
+ getMarketAdvice,
194
+ };
@@ -0,0 +1,333 @@
1
+ /**
2
+ * @fileoverview AI Client - Model Fetching Functions
3
+ *
4
+ * Functions to fetch available models from various AI providers
5
+ * Data comes from provider APIs - NO hardcoded fallbacks
6
+ */
7
+
8
+ const https = require('https');
9
+ const http = require('http');
10
+
11
+ /**
12
+ * Make HTTP request
13
+ * @param {string} url - Full URL
14
+ * @param {Object} options - Request options
15
+ * @returns {Promise<Object>} Response data
16
+ */
17
+ const makeRequest = (url, options) => {
18
+ return new Promise((resolve, reject) => {
19
+ const parsedUrl = new URL(url);
20
+ const protocol = parsedUrl.protocol === 'https:' ? https : http;
21
+
22
+ const req = protocol.request(url, {
23
+ method: options.method || 'POST',
24
+ headers: options.headers || {},
25
+ timeout: options.timeout || 30000
26
+ }, (res) => {
27
+ let data = '';
28
+ res.on('data', chunk => data += chunk);
29
+ res.on('end', () => {
30
+ try {
31
+ const json = JSON.parse(data);
32
+ if (res.statusCode >= 200 && res.statusCode < 300) {
33
+ resolve(json);
34
+ } else {
35
+ reject(new Error(json.error?.message || `HTTP ${res.statusCode}`));
36
+ }
37
+ } catch (e) {
38
+ reject(new Error(`Invalid JSON response: ${data.substring(0, 100)}`));
39
+ }
40
+ });
41
+ });
42
+
43
+ req.on('error', reject);
44
+ req.on('timeout', () => reject(new Error('Request timeout')));
45
+
46
+ if (options.body) {
47
+ req.write(JSON.stringify(options.body));
48
+ }
49
+ req.end();
50
+ });
51
+ };
52
+
53
+ /**
54
+ * Fetch available models from Anthropic API (API Key auth)
55
+ * @param {string} apiKey - API key
56
+ * @returns {Promise<Array|null>} Array of model IDs or null on error
57
+ */
58
+ const fetchAnthropicModels = async (apiKey) => {
59
+ if (!apiKey) return null;
60
+
61
+ const url = 'https://api.anthropic.com/v1/models';
62
+
63
+ const headers = {
64
+ 'x-api-key': apiKey,
65
+ 'anthropic-version': '2023-06-01'
66
+ };
67
+
68
+ try {
69
+ const response = await makeRequest(url, { method: 'GET', headers, timeout: 10000 });
70
+ if (response.data && Array.isArray(response.data)) {
71
+ return response.data.map(m => m.id).filter(Boolean);
72
+ }
73
+ return null;
74
+ } catch (error) {
75
+ return null;
76
+ }
77
+ };
78
+
79
+ /**
80
+ * Fetch available models from Anthropic API (OAuth auth)
81
+ * @param {string} accessToken - OAuth access token
82
+ * @returns {Promise<Object>} { models: Array, error: string|null }
83
+ */
84
+ const fetchAnthropicModelsOAuth = async (accessToken) => {
85
+ if (!accessToken) return { models: null, error: 'No access token provided' };
86
+
87
+ const modelsUrl = 'https://api.anthropic.com/v1/models';
88
+
89
+ const headers = {
90
+ 'Authorization': `Bearer ${accessToken}`,
91
+ 'anthropic-version': '2023-06-01',
92
+ 'anthropic-beta': 'oauth-2025-04-20'
93
+ };
94
+
95
+ try {
96
+ const response = await makeRequest(modelsUrl, { method: 'GET', headers, timeout: 15000 });
97
+ if (response.data && Array.isArray(response.data)) {
98
+ const models = response.data.map(m => m.id).filter(Boolean);
99
+ if (models.length > 0) return { models, error: null };
100
+ }
101
+ return { models: null, error: 'API returned empty or invalid response' };
102
+ } catch (error) {
103
+ return { models: null, error: error.message };
104
+ }
105
+ };
106
+
107
+ /**
108
+ * Fetch available models from Google Gemini API
109
+ * @param {string} apiKey - API key
110
+ * @returns {Promise<Array|null>} Array of model IDs or null on error
111
+ */
112
+ const fetchGeminiModels = async (apiKey) => {
113
+ if (!apiKey) return null;
114
+
115
+ const url = `https://generativelanguage.googleapis.com/v1/models?key=${apiKey}`;
116
+
117
+ try {
118
+ const response = await makeRequest(url, { method: 'GET', timeout: 10000 });
119
+ if (response.models && Array.isArray(response.models)) {
120
+ return response.models
121
+ .filter(m => m.supportedGenerationMethods?.includes('generateContent'))
122
+ .map(m => m.name.replace('models/', ''))
123
+ .filter(Boolean);
124
+ }
125
+ return null;
126
+ } catch (error) {
127
+ return null;
128
+ }
129
+ };
130
+
131
+ /**
132
+ * Fetch available models from OpenAI-compatible API
133
+ * @param {string} endpoint - API endpoint base URL
134
+ * @param {string} apiKey - API key or OAuth token
135
+ * @returns {Promise<Array|null>} Array of model IDs from API, null if unavailable
136
+ */
137
+ const fetchOpenAIModels = async (endpoint, apiKey) => {
138
+ if (!endpoint) return null;
139
+
140
+ const url = `${endpoint}/models`;
141
+
142
+ const headers = {
143
+ 'Content-Type': 'application/json'
144
+ };
145
+
146
+ if (apiKey) {
147
+ headers['Authorization'] = `Bearer ${apiKey}`;
148
+ }
149
+
150
+ try {
151
+ const response = await makeRequest(url, { method: 'GET', headers, timeout: 10000 });
152
+ if (response.data && Array.isArray(response.data)) {
153
+ const chatModels = response.data
154
+ .map(m => m.id)
155
+ .filter(id => id && (
156
+ id.includes('gpt') ||
157
+ id.includes('o1') ||
158
+ id.includes('o3') ||
159
+ id.includes('claude') ||
160
+ id.includes('gemini')
161
+ ))
162
+ .filter(id =>
163
+ !id.includes('embedding') &&
164
+ !id.includes('whisper') &&
165
+ !id.includes('tts') &&
166
+ !id.includes('dall-e')
167
+ );
168
+
169
+ return chatModels.length > 0 ? chatModels : null;
170
+ }
171
+ return null;
172
+ } catch (error) {
173
+ return null;
174
+ }
175
+ };
176
+
177
+ /**
178
+ * Fetch available models for OAuth-authenticated providers
179
+ * @param {string} providerId - Provider ID
180
+ * @param {string} accessToken - OAuth access token
181
+ * @returns {Promise<Array|null>} Array of model IDs from API, null if unavailable
182
+ */
183
+ const fetchModelsWithOAuth = async (providerId, accessToken) => {
184
+ if (!accessToken) return null;
185
+
186
+ try {
187
+ switch (providerId) {
188
+ case 'anthropic':
189
+ return await fetchAnthropicModelsOAuth(accessToken);
190
+
191
+ case 'openai': {
192
+ const openaiModels = await fetchOpenAIModels('https://api.openai.com/v1', accessToken);
193
+ if (openaiModels && openaiModels.length > 0) {
194
+ return openaiModels;
195
+ }
196
+
197
+ // Try ChatGPT backend API (for Plus/Pro plans)
198
+ try {
199
+ const chatgptUrl = 'https://chatgpt.com/backend-api/models';
200
+ const chatgptHeaders = {
201
+ 'Authorization': `Bearer ${accessToken}`,
202
+ 'Content-Type': 'application/json'
203
+ };
204
+ const chatgptResponse = await makeRequest(chatgptUrl, {
205
+ method: 'GET',
206
+ headers: chatgptHeaders,
207
+ timeout: 10000
208
+ });
209
+ if (chatgptResponse.models && Array.isArray(chatgptResponse.models)) {
210
+ return chatgptResponse.models
211
+ .map(m => m.slug || m.id || m.name)
212
+ .filter(Boolean);
213
+ }
214
+ } catch (e) {
215
+ if (process.env.HQX_DEBUG) {
216
+ console.error('[DEBUG] ChatGPT backend error:', e.message);
217
+ }
218
+ }
219
+
220
+ return null;
221
+ }
222
+
223
+ case 'gemini': {
224
+ try {
225
+ const geminiUrl = 'https://generativelanguage.googleapis.com/v1/models';
226
+ const geminiHeaders = {
227
+ 'Authorization': `Bearer ${accessToken}`
228
+ };
229
+ const geminiResponse = await makeRequest(geminiUrl, {
230
+ method: 'GET',
231
+ headers: geminiHeaders,
232
+ timeout: 15000
233
+ });
234
+ if (geminiResponse.models && Array.isArray(geminiResponse.models)) {
235
+ const models = geminiResponse.models
236
+ .filter(m => m.supportedGenerationMethods?.includes('generateContent'))
237
+ .map(m => m.name.replace('models/', ''))
238
+ .filter(Boolean);
239
+ if (models.length > 0) return models;
240
+ }
241
+ } catch (e) {
242
+ if (process.env.HQX_DEBUG) {
243
+ console.error('[DEBUG] Gemini models API error:', e.message);
244
+ }
245
+ }
246
+
247
+ return null;
248
+ }
249
+
250
+ case 'qwen': {
251
+ try {
252
+ const qwenUrl = 'https://dashscope.aliyuncs.com/api/v1/models';
253
+ const qwenHeaders = {
254
+ 'Authorization': `Bearer ${accessToken}`,
255
+ 'Content-Type': 'application/json'
256
+ };
257
+ const qwenResponse = await makeRequest(qwenUrl, {
258
+ method: 'GET',
259
+ headers: qwenHeaders,
260
+ timeout: 10000
261
+ });
262
+ if (qwenResponse.data && Array.isArray(qwenResponse.data)) {
263
+ return qwenResponse.data
264
+ .map(m => m.id || m.model_id)
265
+ .filter(Boolean);
266
+ }
267
+ } catch (e) {
268
+ // Qwen API may not support model listing
269
+ }
270
+ return null;
271
+ }
272
+
273
+ case 'iflow': {
274
+ try {
275
+ const iflowUrl = 'https://apis.iflow.cn/v1/models';
276
+ const iflowHeaders = {
277
+ 'Authorization': `Bearer ${accessToken}`,
278
+ 'Content-Type': 'application/json'
279
+ };
280
+ const iflowResponse = await makeRequest(iflowUrl, {
281
+ method: 'GET',
282
+ headers: iflowHeaders,
283
+ timeout: 10000
284
+ });
285
+ if (iflowResponse.data && Array.isArray(iflowResponse.data)) {
286
+ return iflowResponse.data
287
+ .map(m => m.id)
288
+ .filter(Boolean);
289
+ }
290
+ } catch (e) {
291
+ // iFlow API may not support model listing
292
+ }
293
+ return null;
294
+ }
295
+
296
+ default:
297
+ return null;
298
+ }
299
+ } catch (error) {
300
+ return null;
301
+ }
302
+ };
303
+
304
+ /**
305
+ * Fetch models via local CLIProxyAPI proxy
306
+ * @returns {Promise<Array|null>} Array of model IDs or null
307
+ */
308
+ const fetchModelsViaProxy = async (proxyPort = 8317) => {
309
+ const url = `http://127.0.0.1:${proxyPort}/v1/models`;
310
+
311
+ const headers = {
312
+ 'Authorization': 'Bearer hqx-local-key'
313
+ };
314
+
315
+ try {
316
+ const response = await makeRequest(url, { method: 'GET', headers, timeout: 10000 });
317
+ if (response.data && Array.isArray(response.data)) {
318
+ return response.data.map(m => m.id || m).filter(Boolean);
319
+ }
320
+ return null;
321
+ } catch (error) {
322
+ return null;
323
+ }
324
+ };
325
+
326
+ module.exports = {
327
+ fetchAnthropicModels,
328
+ fetchAnthropicModelsOAuth,
329
+ fetchGeminiModels,
330
+ fetchOpenAIModels,
331
+ fetchModelsWithOAuth,
332
+ fetchModelsViaProxy,
333
+ };