hedgequantx 2.6.152 → 2.6.153

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hedgequantx",
3
- "version": "2.6.152",
3
+ "version": "2.6.153",
4
4
  "description": "HedgeQuantX - Prop Futures Trading CLI",
5
5
  "main": "src/app.js",
6
6
  "bin": {
@@ -19,7 +19,7 @@ const readline = require('readline');
19
19
 
20
20
  const { connections } = require('../../services');
21
21
  const { AlgoUI, renderSessionSummary, renderMultiSymbolSummary } = require('./ui');
22
- const { displayBanner } = require('../../ui');
22
+ const { displayBanner, drawBoxHeaderContinue, drawBoxFooter, drawBoxRow, getLogoWidth, centerText } = require('../../ui');
23
23
  const { prompts } = require('../../utils');
24
24
  const { checkMarketHours } = require('../../services/projectx/market');
25
25
  const { FAST_SCALPING } = require('../../config/settings');
@@ -304,11 +304,13 @@ const customStrategyMenu = async (service) => {
304
304
  }
305
305
 
306
306
  const agentName = agents[0].name || agents[0].provider;
307
- console.log(chalk.cyan('\n ╔════════════════════════════════════════════════════════════╗'));
308
- console.log(chalk.cyan(' ║') + chalk.yellow.bold(' CUSTOM STRATEGY - AI GENERATED ') + chalk.cyan('║'));
309
- console.log(chalk.cyan(' ╠════════════════════════════════════════════════════════════╣'));
310
- console.log(chalk.cyan(' ║') + chalk.gray(` Agent: ${agentName.padEnd(51)}`) + chalk.cyan('║'));
311
- console.log(chalk.cyan(' ╚════════════════════════════════════════════════════════════╝\n'));
307
+ const boxWidth = Math.max(getLogoWidth(), 98);
308
+ const innerWidth = boxWidth - 2;
309
+
310
+ // Header aligned with main banner
311
+ drawBoxHeaderContinue('CUSTOM STRATEGY - AI GENERATED');
312
+ drawBoxRow(`Agent: ${agentName}`, innerWidth);
313
+ drawBoxFooter();
312
314
 
313
315
  // Step 1: Get strategy description from user
314
316
  console.log(chalk.yellow(' Describe your trading strategy in natural language:'));
@@ -366,12 +368,11 @@ const customStrategyMenu = async (service) => {
366
368
  console.log(chalk.gray(` Saved to: ${filepath}\n`));
367
369
 
368
370
  // Step 4: Show strategy summary and confirm
369
- console.log(chalk.cyan(' ╔════════════════════════════════════════════════════════════╗'));
370
- console.log(chalk.cyan(' ║') + chalk.green.bold(' STRATEGY READY ') + chalk.cyan('║'));
371
- console.log(chalk.cyan(' ╠════════════════════════════════════════════════════════════╣'));
372
- console.log(chalk.cyan(' ║') + chalk.white(` Name: ${genResult.name.substring(0, 51).padEnd(51)}`) + chalk.cyan('║'));
373
- console.log(chalk.cyan(' ║') + chalk.gray(` File: ${filename.padEnd(51)}`) + chalk.cyan('║'));
374
- console.log(chalk.cyan(' ╚════════════════════════════════════════════════════════════╝\n'));
371
+ console.log('');
372
+ drawBoxHeaderContinue('STRATEGY READY');
373
+ drawBoxRow(`Name: ${genResult.name}`, innerWidth);
374
+ drawBoxRow(`File: ${filename}`, innerWidth);
375
+ drawBoxFooter();
375
376
 
376
377
  const confirm = await prompts.textInput(chalk.cyan(' Continue with this strategy? (Y/n): '));
377
378
  if (confirm.toLowerCase() === 'n') {
@@ -393,8 +394,8 @@ const customStrategyMenu = async (service) => {
393
394
  * Same as one-account but uses the custom strategy class
394
395
  */
395
396
  async function executeWithCustomStrategy(service, StrategyClass, strategyName) {
396
- // Import one-account's configuration prompts
397
- const { getLogoWidth, drawBoxHeaderContinue, drawBoxFooter, displayBanner } = require('../../ui');
397
+ const boxWidth = Math.max(getLogoWidth(), 98);
398
+ const innerWidth = boxWidth - 2;
398
399
 
399
400
  // Get accounts
400
401
  const accountsResult = await service.getTradingAccounts();
@@ -61,9 +61,10 @@ const makeRequest = (url, options) => {
61
61
  * @param {Object} agent - Agent configuration
62
62
  * @param {string} prompt - User prompt
63
63
  * @param {string} systemPrompt - System prompt
64
+ * @param {Object} options - Optional settings { maxTokens, temperature, timeout }
64
65
  * @returns {Promise<string|null>} Response text or null on error
65
66
  */
66
- const callOpenAICompatible = async (agent, prompt, systemPrompt) => {
67
+ const callOpenAICompatible = async (agent, prompt, systemPrompt, options = {}) => {
67
68
  const provider = getProvider(agent.providerId);
68
69
  if (!provider) return null;
69
70
 
@@ -97,12 +98,12 @@ const callOpenAICompatible = async (agent, prompt, systemPrompt) => {
97
98
  { role: 'system', content: systemPrompt },
98
99
  { role: 'user', content: prompt }
99
100
  ],
100
- temperature: 0.3,
101
- max_tokens: 500
101
+ temperature: options.temperature || 0.3
102
102
  };
103
103
 
104
104
  try {
105
- const response = await makeRequest(url, { headers, body, timeout: 30000 });
105
+ const timeout = options.timeout || 30000;
106
+ const response = await makeRequest(url, { headers, body, timeout });
106
107
  return response.choices?.[0]?.message?.content || null;
107
108
  } catch (error) {
108
109
  return null;
@@ -138,9 +139,10 @@ const getValidOAuthToken = async (credentials) => {
138
139
  * @param {Object} agent - Agent configuration
139
140
  * @param {string} prompt - User prompt
140
141
  * @param {string} systemPrompt - System prompt
142
+ * @param {Object} options - Optional settings { maxTokens, temperature, timeout }
141
143
  * @returns {Promise<string|null>} Response text or null on error
142
144
  */
143
- const callAnthropic = async (agent, prompt, systemPrompt) => {
145
+ const callAnthropic = async (agent, prompt, systemPrompt, options = {}) => {
144
146
  const provider = getProvider('anthropic');
145
147
  if (!provider) return null;
146
148
 
@@ -171,7 +173,7 @@ const callAnthropic = async (agent, prompt, systemPrompt) => {
171
173
 
172
174
  const body = {
173
175
  model,
174
- max_tokens: 500,
176
+ max_tokens: 8192,
175
177
  system: systemPrompt,
176
178
  messages: [
177
179
  { role: 'user', content: prompt }
@@ -179,7 +181,8 @@ const callAnthropic = async (agent, prompt, systemPrompt) => {
179
181
  };
180
182
 
181
183
  try {
182
- const response = await makeRequest(url, { headers, body, timeout: 30000 });
184
+ const timeout = options.timeout || 30000;
185
+ const response = await makeRequest(url, { headers, body, timeout });
183
186
  return response.content?.[0]?.text || null;
184
187
  } catch (error) {
185
188
  return null;
@@ -193,7 +196,7 @@ const callAnthropic = async (agent, prompt, systemPrompt) => {
193
196
  * @param {string} systemPrompt - System prompt
194
197
  * @returns {Promise<string|null>} Response text or null on error
195
198
  */
196
- const callGemini = async (agent, prompt, systemPrompt) => {
199
+ const callGemini = async (agent, prompt, systemPrompt, options = {}) => {
197
200
  const provider = getProvider('gemini');
198
201
  if (!provider) return null;
199
202
 
@@ -213,13 +216,13 @@ const callGemini = async (agent, prompt, systemPrompt) => {
213
216
  { role: 'user', parts: [{ text: `${systemPrompt}\n\n${prompt}` }] }
214
217
  ],
215
218
  generationConfig: {
216
- temperature: 0.3,
217
- maxOutputTokens: 500
219
+ temperature: 0.3
218
220
  }
219
221
  };
220
222
 
221
223
  try {
222
- const response = await makeRequest(url, { headers, body, timeout: 30000 });
224
+ const timeout = options.timeout || 60000;
225
+ const response = await makeRequest(url, { headers, body, timeout });
223
226
  return response.candidates?.[0]?.content?.parts?.[0]?.text || null;
224
227
  } catch (error) {
225
228
  return null;
@@ -234,7 +237,7 @@ const callGemini = async (agent, prompt, systemPrompt) => {
234
237
  * @param {string} systemPrompt - System prompt
235
238
  * @returns {Promise<string|null>} Response text or null on error
236
239
  */
237
- const callViaProxy = async (agent, prompt, systemPrompt) => {
240
+ const callViaProxy = async (agent, prompt, systemPrompt, options = {}) => {
238
241
  const proxyPort = agent.credentials?.proxyPort || 8317;
239
242
  const model = agent.model;
240
243
 
@@ -253,12 +256,12 @@ const callViaProxy = async (agent, prompt, systemPrompt) => {
253
256
  { role: 'system', content: systemPrompt },
254
257
  { role: 'user', content: prompt }
255
258
  ],
256
- temperature: 0.3,
257
- max_tokens: 500
259
+ temperature: 0.3
258
260
  };
259
261
 
260
262
  try {
261
- const response = await makeRequest(url, { headers, body, timeout: 30000 });
263
+ const timeout = options.timeout || 60000;
264
+ const response = await makeRequest(url, { headers, body, timeout });
262
265
  return response.choices?.[0]?.message?.content || null;
263
266
  } catch (error) {
264
267
  return null;
@@ -270,22 +273,26 @@ const callViaProxy = async (agent, prompt, systemPrompt) => {
270
273
  * @param {Object} agent - Agent with providerId and credentials
271
274
  * @param {string} prompt - User prompt
272
275
  * @param {string} systemPrompt - System prompt
276
+ * @param {Object} options - Optional settings { maxTokens, temperature, timeout }
273
277
  * @returns {Promise<string|null>} AI response or null on error
274
278
  */
275
- const callAI = async (agent, prompt, systemPrompt = '') => {
279
+ const callAI = async (agent, prompt, systemPrompt = '', options = {}) => {
276
280
  if (!agent || !agent.providerId) return null;
277
281
 
282
+ // Default timeout 60s for code generation
283
+ const opts = { timeout: 60000, ...options };
284
+
278
285
  // Check if using proxy mode (subscription accounts)
279
286
  if (agent.credentials?.useProxy) {
280
- return callViaProxy(agent, prompt, systemPrompt);
287
+ return callViaProxy(agent, prompt, systemPrompt, opts);
281
288
  }
282
289
 
283
290
  switch (agent.providerId) {
284
291
  case 'anthropic':
285
- return callAnthropic(agent, prompt, systemPrompt);
292
+ return callAnthropic(agent, prompt, systemPrompt, opts);
286
293
 
287
294
  case 'gemini':
288
- return callGemini(agent, prompt, systemPrompt);
295
+ return callGemini(agent, prompt, systemPrompt, opts);
289
296
 
290
297
  // All OpenAI-compatible APIs
291
298
  case 'openai':
@@ -304,7 +311,7 @@ const callAI = async (agent, prompt, systemPrompt = '') => {
304
311
  case 'ollama':
305
312
  case 'lmstudio':
306
313
  case 'custom':
307
- return callOpenAICompatible(agent, prompt, systemPrompt);
314
+ return callOpenAICompatible(agent, prompt, systemPrompt, opts);
308
315
 
309
316
  default:
310
317
  return null;