hedgequantx 2.6.151 → 2.6.153
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/pages/algo/custom-strategy.js +35 -29
- package/src/services/ai/client.js +27 -20
package/package.json
CHANGED
|
@@ -19,6 +19,7 @@ const readline = require('readline');
|
|
|
19
19
|
|
|
20
20
|
const { connections } = require('../../services');
|
|
21
21
|
const { AlgoUI, renderSessionSummary, renderMultiSymbolSummary } = require('./ui');
|
|
22
|
+
const { displayBanner, drawBoxHeaderContinue, drawBoxFooter, drawBoxRow, getLogoWidth, centerText } = require('../../ui');
|
|
22
23
|
const { prompts } = require('../../utils');
|
|
23
24
|
const { checkMarketHours } = require('../../services/projectx/market');
|
|
24
25
|
const { FAST_SCALPING } = require('../../config/settings');
|
|
@@ -28,6 +29,7 @@ const { algoLogger } = require('./logger');
|
|
|
28
29
|
const { recoveryMath } = require('../../services/strategy/recovery-math');
|
|
29
30
|
const aiService = require('../../services/ai');
|
|
30
31
|
const { launchMultiSymbolRithmic } = require('./one-account');
|
|
32
|
+
const aiClient = require('../../services/ai/client');
|
|
31
33
|
|
|
32
34
|
// Strategy template that the AI will fill
|
|
33
35
|
const STRATEGY_TEMPLATE = `/**
|
|
@@ -127,16 +129,21 @@ function getStrategyDir() {
|
|
|
127
129
|
* Generate strategy code using AI agent
|
|
128
130
|
*/
|
|
129
131
|
async function generateStrategyCode(description, agentName) {
|
|
130
|
-
const
|
|
132
|
+
const agents = aiService.getAgents();
|
|
133
|
+
const agent = agents.find(a => a.name === agentName || a.provider === agentName);
|
|
131
134
|
if (!agent) {
|
|
132
135
|
return { success: false, error: 'No AI agent available' };
|
|
133
136
|
}
|
|
134
137
|
|
|
135
|
-
const
|
|
138
|
+
const systemPrompt = `You are a trading strategy code generator for HQX-CLI.
|
|
139
|
+
Generate trading strategies based on user descriptions.
|
|
140
|
+
You must output ONLY valid JSON with the strategy components, no markdown, no explanation.`;
|
|
141
|
+
|
|
142
|
+
const prompt = `Generate a trading strategy based on this description:
|
|
136
143
|
|
|
137
144
|
"${description}"
|
|
138
145
|
|
|
139
|
-
|
|
146
|
+
Fill in these template sections:
|
|
140
147
|
|
|
141
148
|
1. STRATEGY_NAME: A short name for the strategy (string)
|
|
142
149
|
2. STRATEGY_DESCRIPTION: One line description (string)
|
|
@@ -153,7 +160,7 @@ Available in processTick:
|
|
|
153
160
|
- this.tickSize, this.tickValue (contract specs)
|
|
154
161
|
- this.tickCount (number of ticks processed)
|
|
155
162
|
|
|
156
|
-
Output format (JSON):
|
|
163
|
+
Output format (JSON only):
|
|
157
164
|
{
|
|
158
165
|
"STRATEGY_NAME": "...",
|
|
159
166
|
"STRATEGY_DESCRIPTION": "...",
|
|
@@ -161,29 +168,26 @@ Output format (JSON):
|
|
|
161
168
|
"STRATEGY_LOGIC": "...",
|
|
162
169
|
"STRATEGY_GET_STATE": "...",
|
|
163
170
|
"STRATEGY_RESET": "..."
|
|
164
|
-
}
|
|
165
|
-
|
|
166
|
-
IMPORTANT: Output ONLY the JSON, no markdown, no explanation.`;
|
|
171
|
+
}`;
|
|
167
172
|
|
|
168
173
|
try {
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
]);
|
|
174
|
+
// Use aiClient.callAI which handles all providers correctly
|
|
175
|
+
const response = await aiClient.callAI(agent, prompt, systemPrompt);
|
|
172
176
|
|
|
173
|
-
if (!response
|
|
174
|
-
return { success: false, error:
|
|
177
|
+
if (!response) {
|
|
178
|
+
return { success: false, error: 'AI request failed - no response' };
|
|
175
179
|
}
|
|
176
180
|
|
|
177
181
|
// Parse JSON response
|
|
178
182
|
let strategyParts;
|
|
179
183
|
try {
|
|
180
184
|
// Extract JSON from response (might have markdown)
|
|
181
|
-
|
|
182
|
-
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
|
185
|
+
const jsonMatch = response.match(/\{[\s\S]*\}/);
|
|
183
186
|
if (jsonMatch) {
|
|
184
|
-
|
|
187
|
+
strategyParts = JSON.parse(jsonMatch[0]);
|
|
188
|
+
} else {
|
|
189
|
+
return { success: false, error: 'AI response did not contain valid JSON' };
|
|
185
190
|
}
|
|
186
|
-
strategyParts = JSON.parse(jsonStr);
|
|
187
191
|
} catch (parseError) {
|
|
188
192
|
return { success: false, error: `Failed to parse AI response: ${parseError.message}` };
|
|
189
193
|
}
|
|
@@ -288,6 +292,7 @@ async function validateStrategyCode(code, filepath) {
|
|
|
288
292
|
*/
|
|
289
293
|
const customStrategyMenu = async (service) => {
|
|
290
294
|
console.clear();
|
|
295
|
+
displayBanner();
|
|
291
296
|
|
|
292
297
|
// Check if AI agent is connected
|
|
293
298
|
const agents = aiService.getAgents();
|
|
@@ -299,11 +304,13 @@ const customStrategyMenu = async (service) => {
|
|
|
299
304
|
}
|
|
300
305
|
|
|
301
306
|
const agentName = agents[0].name || agents[0].provider;
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
+
const boxWidth = Math.max(getLogoWidth(), 98);
|
|
308
|
+
const innerWidth = boxWidth - 2;
|
|
309
|
+
|
|
310
|
+
// Header aligned with main banner
|
|
311
|
+
drawBoxHeaderContinue('CUSTOM STRATEGY - AI GENERATED');
|
|
312
|
+
drawBoxRow(`Agent: ${agentName}`, innerWidth);
|
|
313
|
+
drawBoxFooter();
|
|
307
314
|
|
|
308
315
|
// Step 1: Get strategy description from user
|
|
309
316
|
console.log(chalk.yellow(' Describe your trading strategy in natural language:'));
|
|
@@ -361,12 +368,11 @@ const customStrategyMenu = async (service) => {
|
|
|
361
368
|
console.log(chalk.gray(` Saved to: ${filepath}\n`));
|
|
362
369
|
|
|
363
370
|
// Step 4: Show strategy summary and confirm
|
|
364
|
-
console.log(
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
console.log(chalk.cyan(' ╚════════════════════════════════════════════════════════════╝\n'));
|
|
371
|
+
console.log('');
|
|
372
|
+
drawBoxHeaderContinue('STRATEGY READY');
|
|
373
|
+
drawBoxRow(`Name: ${genResult.name}`, innerWidth);
|
|
374
|
+
drawBoxRow(`File: ${filename}`, innerWidth);
|
|
375
|
+
drawBoxFooter();
|
|
370
376
|
|
|
371
377
|
const confirm = await prompts.textInput(chalk.cyan(' Continue with this strategy? (Y/n): '));
|
|
372
378
|
if (confirm.toLowerCase() === 'n') {
|
|
@@ -388,8 +394,8 @@ const customStrategyMenu = async (service) => {
|
|
|
388
394
|
* Same as one-account but uses the custom strategy class
|
|
389
395
|
*/
|
|
390
396
|
async function executeWithCustomStrategy(service, StrategyClass, strategyName) {
|
|
391
|
-
|
|
392
|
-
const
|
|
397
|
+
const boxWidth = Math.max(getLogoWidth(), 98);
|
|
398
|
+
const innerWidth = boxWidth - 2;
|
|
393
399
|
|
|
394
400
|
// Get accounts
|
|
395
401
|
const accountsResult = await service.getTradingAccounts();
|
|
@@ -61,9 +61,10 @@ const makeRequest = (url, options) => {
|
|
|
61
61
|
* @param {Object} agent - Agent configuration
|
|
62
62
|
* @param {string} prompt - User prompt
|
|
63
63
|
* @param {string} systemPrompt - System prompt
|
|
64
|
+
* @param {Object} options - Optional settings { maxTokens, temperature, timeout }
|
|
64
65
|
* @returns {Promise<string|null>} Response text or null on error
|
|
65
66
|
*/
|
|
66
|
-
const callOpenAICompatible = async (agent, prompt, systemPrompt) => {
|
|
67
|
+
const callOpenAICompatible = async (agent, prompt, systemPrompt, options = {}) => {
|
|
67
68
|
const provider = getProvider(agent.providerId);
|
|
68
69
|
if (!provider) return null;
|
|
69
70
|
|
|
@@ -97,12 +98,12 @@ const callOpenAICompatible = async (agent, prompt, systemPrompt) => {
|
|
|
97
98
|
{ role: 'system', content: systemPrompt },
|
|
98
99
|
{ role: 'user', content: prompt }
|
|
99
100
|
],
|
|
100
|
-
temperature: 0.3
|
|
101
|
-
max_tokens: 500
|
|
101
|
+
temperature: options.temperature || 0.3
|
|
102
102
|
};
|
|
103
103
|
|
|
104
104
|
try {
|
|
105
|
-
const
|
|
105
|
+
const timeout = options.timeout || 30000;
|
|
106
|
+
const response = await makeRequest(url, { headers, body, timeout });
|
|
106
107
|
return response.choices?.[0]?.message?.content || null;
|
|
107
108
|
} catch (error) {
|
|
108
109
|
return null;
|
|
@@ -138,9 +139,10 @@ const getValidOAuthToken = async (credentials) => {
|
|
|
138
139
|
* @param {Object} agent - Agent configuration
|
|
139
140
|
* @param {string} prompt - User prompt
|
|
140
141
|
* @param {string} systemPrompt - System prompt
|
|
142
|
+
* @param {Object} options - Optional settings { maxTokens, temperature, timeout }
|
|
141
143
|
* @returns {Promise<string|null>} Response text or null on error
|
|
142
144
|
*/
|
|
143
|
-
const callAnthropic = async (agent, prompt, systemPrompt) => {
|
|
145
|
+
const callAnthropic = async (agent, prompt, systemPrompt, options = {}) => {
|
|
144
146
|
const provider = getProvider('anthropic');
|
|
145
147
|
if (!provider) return null;
|
|
146
148
|
|
|
@@ -171,7 +173,7 @@ const callAnthropic = async (agent, prompt, systemPrompt) => {
|
|
|
171
173
|
|
|
172
174
|
const body = {
|
|
173
175
|
model,
|
|
174
|
-
max_tokens:
|
|
176
|
+
max_tokens: 8192,
|
|
175
177
|
system: systemPrompt,
|
|
176
178
|
messages: [
|
|
177
179
|
{ role: 'user', content: prompt }
|
|
@@ -179,7 +181,8 @@ const callAnthropic = async (agent, prompt, systemPrompt) => {
|
|
|
179
181
|
};
|
|
180
182
|
|
|
181
183
|
try {
|
|
182
|
-
const
|
|
184
|
+
const timeout = options.timeout || 30000;
|
|
185
|
+
const response = await makeRequest(url, { headers, body, timeout });
|
|
183
186
|
return response.content?.[0]?.text || null;
|
|
184
187
|
} catch (error) {
|
|
185
188
|
return null;
|
|
@@ -193,7 +196,7 @@ const callAnthropic = async (agent, prompt, systemPrompt) => {
|
|
|
193
196
|
* @param {string} systemPrompt - System prompt
|
|
194
197
|
* @returns {Promise<string|null>} Response text or null on error
|
|
195
198
|
*/
|
|
196
|
-
const callGemini = async (agent, prompt, systemPrompt) => {
|
|
199
|
+
const callGemini = async (agent, prompt, systemPrompt, options = {}) => {
|
|
197
200
|
const provider = getProvider('gemini');
|
|
198
201
|
if (!provider) return null;
|
|
199
202
|
|
|
@@ -213,13 +216,13 @@ const callGemini = async (agent, prompt, systemPrompt) => {
|
|
|
213
216
|
{ role: 'user', parts: [{ text: `${systemPrompt}\n\n${prompt}` }] }
|
|
214
217
|
],
|
|
215
218
|
generationConfig: {
|
|
216
|
-
temperature: 0.3
|
|
217
|
-
maxOutputTokens: 500
|
|
219
|
+
temperature: 0.3
|
|
218
220
|
}
|
|
219
221
|
};
|
|
220
222
|
|
|
221
223
|
try {
|
|
222
|
-
const
|
|
224
|
+
const timeout = options.timeout || 60000;
|
|
225
|
+
const response = await makeRequest(url, { headers, body, timeout });
|
|
223
226
|
return response.candidates?.[0]?.content?.parts?.[0]?.text || null;
|
|
224
227
|
} catch (error) {
|
|
225
228
|
return null;
|
|
@@ -234,7 +237,7 @@ const callGemini = async (agent, prompt, systemPrompt) => {
|
|
|
234
237
|
* @param {string} systemPrompt - System prompt
|
|
235
238
|
* @returns {Promise<string|null>} Response text or null on error
|
|
236
239
|
*/
|
|
237
|
-
const callViaProxy = async (agent, prompt, systemPrompt) => {
|
|
240
|
+
const callViaProxy = async (agent, prompt, systemPrompt, options = {}) => {
|
|
238
241
|
const proxyPort = agent.credentials?.proxyPort || 8317;
|
|
239
242
|
const model = agent.model;
|
|
240
243
|
|
|
@@ -253,12 +256,12 @@ const callViaProxy = async (agent, prompt, systemPrompt) => {
|
|
|
253
256
|
{ role: 'system', content: systemPrompt },
|
|
254
257
|
{ role: 'user', content: prompt }
|
|
255
258
|
],
|
|
256
|
-
temperature: 0.3
|
|
257
|
-
max_tokens: 500
|
|
259
|
+
temperature: 0.3
|
|
258
260
|
};
|
|
259
261
|
|
|
260
262
|
try {
|
|
261
|
-
const
|
|
263
|
+
const timeout = options.timeout || 60000;
|
|
264
|
+
const response = await makeRequest(url, { headers, body, timeout });
|
|
262
265
|
return response.choices?.[0]?.message?.content || null;
|
|
263
266
|
} catch (error) {
|
|
264
267
|
return null;
|
|
@@ -270,22 +273,26 @@ const callViaProxy = async (agent, prompt, systemPrompt) => {
|
|
|
270
273
|
* @param {Object} agent - Agent with providerId and credentials
|
|
271
274
|
* @param {string} prompt - User prompt
|
|
272
275
|
* @param {string} systemPrompt - System prompt
|
|
276
|
+
* @param {Object} options - Optional settings { maxTokens, temperature, timeout }
|
|
273
277
|
* @returns {Promise<string|null>} AI response or null on error
|
|
274
278
|
*/
|
|
275
|
-
const callAI = async (agent, prompt, systemPrompt = '') => {
|
|
279
|
+
const callAI = async (agent, prompt, systemPrompt = '', options = {}) => {
|
|
276
280
|
if (!agent || !agent.providerId) return null;
|
|
277
281
|
|
|
282
|
+
// Default timeout 60s for code generation
|
|
283
|
+
const opts = { timeout: 60000, ...options };
|
|
284
|
+
|
|
278
285
|
// Check if using proxy mode (subscription accounts)
|
|
279
286
|
if (agent.credentials?.useProxy) {
|
|
280
|
-
return callViaProxy(agent, prompt, systemPrompt);
|
|
287
|
+
return callViaProxy(agent, prompt, systemPrompt, opts);
|
|
281
288
|
}
|
|
282
289
|
|
|
283
290
|
switch (agent.providerId) {
|
|
284
291
|
case 'anthropic':
|
|
285
|
-
return callAnthropic(agent, prompt, systemPrompt);
|
|
292
|
+
return callAnthropic(agent, prompt, systemPrompt, opts);
|
|
286
293
|
|
|
287
294
|
case 'gemini':
|
|
288
|
-
return callGemini(agent, prompt, systemPrompt);
|
|
295
|
+
return callGemini(agent, prompt, systemPrompt, opts);
|
|
289
296
|
|
|
290
297
|
// All OpenAI-compatible APIs
|
|
291
298
|
case 'openai':
|
|
@@ -304,7 +311,7 @@ const callAI = async (agent, prompt, systemPrompt = '') => {
|
|
|
304
311
|
case 'ollama':
|
|
305
312
|
case 'lmstudio':
|
|
306
313
|
case 'custom':
|
|
307
|
-
return callOpenAICompatible(agent, prompt, systemPrompt);
|
|
314
|
+
return callOpenAICompatible(agent, prompt, systemPrompt, opts);
|
|
308
315
|
|
|
309
316
|
default:
|
|
310
317
|
return null;
|