bulltrackers-module 1.0.105 → 1.0.107
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.MD +222 -222
- package/functions/appscript-api/helpers/errors.js +19 -19
- package/functions/appscript-api/index.js +58 -58
- package/functions/computation-system/helpers/orchestration_helpers.js +667 -113
- package/functions/computation-system/utils/data_loader.js +191 -191
- package/functions/computation-system/utils/utils.js +149 -254
- package/functions/core/utils/firestore_utils.js +433 -433
- package/functions/core/utils/pubsub_utils.js +53 -53
- package/functions/dispatcher/helpers/dispatch_helpers.js +47 -47
- package/functions/dispatcher/index.js +52 -52
- package/functions/etoro-price-fetcher/helpers/handler_helpers.js +124 -124
- package/functions/fetch-insights/helpers/handler_helpers.js +91 -91
- package/functions/generic-api/helpers/api_helpers.js +379 -379
- package/functions/generic-api/index.js +150 -150
- package/functions/invalid-speculator-handler/helpers/handler_helpers.js +75 -75
- package/functions/orchestrator/helpers/discovery_helpers.js +226 -226
- package/functions/orchestrator/helpers/update_helpers.js +92 -92
- package/functions/orchestrator/index.js +147 -147
- package/functions/price-backfill/helpers/handler_helpers.js +116 -123
- package/functions/social-orchestrator/helpers/orchestrator_helpers.js +61 -61
- package/functions/social-task-handler/helpers/handler_helpers.js +288 -288
- package/functions/task-engine/handler_creator.js +78 -78
- package/functions/task-engine/helpers/discover_helpers.js +125 -125
- package/functions/task-engine/helpers/update_helpers.js +118 -118
- package/functions/task-engine/helpers/verify_helpers.js +162 -162
- package/functions/task-engine/utils/firestore_batch_manager.js +258 -258
- package/index.js +105 -113
- package/package.json +45 -45
- package/functions/computation-system/computation_dependencies.json +0 -120
- package/functions/computation-system/helpers/worker_helpers.js +0 -340
- package/functions/computation-system/utils/computation_state_manager.js +0 -178
- package/functions/computation-system/utils/dependency_graph.js +0 -191
- package/functions/speculator-cleanup-orchestrator/helpers/cleanup_helpers.js +0 -160
|
@@ -1,79 +1,79 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* @fileoverview Main pipe for the Task Engine.
|
|
3
|
-
* REFACTORED: This file is now 'handler_creator.js' in name only.
|
|
4
|
-
* It exports the main 'handleRequest' pipe function.
|
|
5
|
-
* OPTIMIZED: Removed 'batchManager.flushBatches()' from 'finally' block
|
|
6
|
-
* to enable true asynchronous batching.
|
|
7
|
-
*/
|
|
8
|
-
|
|
9
|
-
const { handleDiscover } = require('./helpers/discover_helpers');
|
|
10
|
-
const { handleVerify } = require('./helpers/verify_helpers');
|
|
11
|
-
const { handleUpdate } = require('./helpers/update_helpers');
|
|
12
|
-
|
|
13
|
-
/**
|
|
14
|
-
* Main pipe: pipe.taskEngine.handleRequest
|
|
15
|
-
* This is the Pub/Sub triggered handler.
|
|
16
|
-
* @param {object} message - The Pub/Sub message.
|
|
17
|
-
* @param {object} context - The message context.
|
|
18
|
-
* @param {object} config - The task-engine-specific config.
|
|
19
|
-
* @param {object} dependencies - Contains all clients: db, pubsub, logger, headerManager, proxyManager, batchManager.
|
|
20
|
-
*/
|
|
21
|
-
async function handleRequest(message, context, config, dependencies) {
|
|
22
|
-
// --- OPTIMIZATION: Destructure headerManager here ---
|
|
23
|
-
const { logger, batchManager, headerManager } = dependencies;
|
|
24
|
-
|
|
25
|
-
if (!message || !message.data) {
|
|
26
|
-
logger.log('ERROR', '[TaskEngine Module] Received invalid message structure.', { message });
|
|
27
|
-
return;
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
let task;
|
|
31
|
-
try {
|
|
32
|
-
task = JSON.parse(Buffer.from(message.data, 'base64').toString('utf-8'));
|
|
33
|
-
} catch (e) {
|
|
34
|
-
logger.log('ERROR', '[TaskEngine Module] Failed to parse Pub/Sub message data.', { error: e.message, data: message.data });
|
|
35
|
-
return;
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
const taskId = `${task.type || 'unknown'}-${task.userType || 'unknown'}-${task.userId || task.cids?.[0] || 'batch'}-${Date.now()}`;
|
|
39
|
-
logger.log('INFO', `[TaskEngine/${taskId}] Received.`);
|
|
40
|
-
|
|
41
|
-
try {
|
|
42
|
-
await new Promise(resolve => setTimeout(resolve, Math.random() * 500 + 250));
|
|
43
|
-
|
|
44
|
-
// --- 3. Use the directly required functions ---
|
|
45
|
-
// Route to the correct sub-pipe
|
|
46
|
-
const handlerFunction = {
|
|
47
|
-
discover: handleDiscover,
|
|
48
|
-
verify: handleVerify,
|
|
49
|
-
update: handleUpdate
|
|
50
|
-
}[task.type];
|
|
51
|
-
|
|
52
|
-
if (handlerFunction) {
|
|
53
|
-
// Pass the dependencies object to the sub-pipe
|
|
54
|
-
await handlerFunction(task, taskId, dependencies, config);
|
|
55
|
-
logger.log('SUCCESS', `[TaskEngine/${taskId}] Done.`);
|
|
56
|
-
} else {
|
|
57
|
-
logger.log('ERROR', `[TaskEngine/${taskId}] Unknown task type received: ${task.type}`);
|
|
58
|
-
}
|
|
59
|
-
} catch (error) {
|
|
60
|
-
logger.log('ERROR', `[TaskEngine/${taskId}] Failed.`, { errorMessage: error.message, errorStack: error.stack });
|
|
61
|
-
|
|
62
|
-
} finally {
|
|
63
|
-
try {
|
|
64
|
-
// --- OPTIMIZATION START ---
|
|
65
|
-
// The FirestoreBatchManager will flush itself on its own timer.
|
|
66
|
-
// We no longer call `await batchManager.flushBatches()` here.
|
|
67
|
-
// We *only* need to flush the header performance updates at the end of the task.
|
|
68
|
-
await headerManager.flushPerformanceUpdates();
|
|
69
|
-
logger.log('INFO', `[TaskEngine/${taskId}] Final header performance flush complete.`);
|
|
70
|
-
// --- OPTIMIZATION END ---
|
|
71
|
-
} catch (flushError) {
|
|
72
|
-
logger.log('ERROR', `[TaskEngine/${taskId}] Error during final flush attempt.`, { error: flushError.message });
|
|
73
|
-
}
|
|
74
|
-
}
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
module.exports = {
|
|
78
|
-
handleRequest,
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Main pipe for the Task Engine.
|
|
3
|
+
* REFACTORED: This file is now 'handler_creator.js' in name only.
|
|
4
|
+
* It exports the main 'handleRequest' pipe function.
|
|
5
|
+
* OPTIMIZED: Removed 'batchManager.flushBatches()' from 'finally' block
|
|
6
|
+
* to enable true asynchronous batching.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
const { handleDiscover } = require('./helpers/discover_helpers');
|
|
10
|
+
const { handleVerify } = require('./helpers/verify_helpers');
|
|
11
|
+
const { handleUpdate } = require('./helpers/update_helpers');
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Main pipe: pipe.taskEngine.handleRequest
|
|
15
|
+
* This is the Pub/Sub triggered handler.
|
|
16
|
+
* @param {object} message - The Pub/Sub message.
|
|
17
|
+
* @param {object} context - The message context.
|
|
18
|
+
* @param {object} config - The task-engine-specific config.
|
|
19
|
+
* @param {object} dependencies - Contains all clients: db, pubsub, logger, headerManager, proxyManager, batchManager.
|
|
20
|
+
*/
|
|
21
|
+
async function handleRequest(message, context, config, dependencies) {
|
|
22
|
+
// --- OPTIMIZATION: Destructure headerManager here ---
|
|
23
|
+
const { logger, batchManager, headerManager } = dependencies;
|
|
24
|
+
|
|
25
|
+
if (!message || !message.data) {
|
|
26
|
+
logger.log('ERROR', '[TaskEngine Module] Received invalid message structure.', { message });
|
|
27
|
+
return;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
let task;
|
|
31
|
+
try {
|
|
32
|
+
task = JSON.parse(Buffer.from(message.data, 'base64').toString('utf-8'));
|
|
33
|
+
} catch (e) {
|
|
34
|
+
logger.log('ERROR', '[TaskEngine Module] Failed to parse Pub/Sub message data.', { error: e.message, data: message.data });
|
|
35
|
+
return;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const taskId = `${task.type || 'unknown'}-${task.userType || 'unknown'}-${task.userId || task.cids?.[0] || 'batch'}-${Date.now()}`;
|
|
39
|
+
logger.log('INFO', `[TaskEngine/${taskId}] Received.`);
|
|
40
|
+
|
|
41
|
+
try {
|
|
42
|
+
await new Promise(resolve => setTimeout(resolve, Math.random() * 500 + 250));
|
|
43
|
+
|
|
44
|
+
// --- 3. Use the directly required functions ---
|
|
45
|
+
// Route to the correct sub-pipe
|
|
46
|
+
const handlerFunction = {
|
|
47
|
+
discover: handleDiscover,
|
|
48
|
+
verify: handleVerify,
|
|
49
|
+
update: handleUpdate
|
|
50
|
+
}[task.type];
|
|
51
|
+
|
|
52
|
+
if (handlerFunction) {
|
|
53
|
+
// Pass the dependencies object to the sub-pipe
|
|
54
|
+
await handlerFunction(task, taskId, dependencies, config);
|
|
55
|
+
logger.log('SUCCESS', `[TaskEngine/${taskId}] Done.`);
|
|
56
|
+
} else {
|
|
57
|
+
logger.log('ERROR', `[TaskEngine/${taskId}] Unknown task type received: ${task.type}`);
|
|
58
|
+
}
|
|
59
|
+
} catch (error) {
|
|
60
|
+
logger.log('ERROR', `[TaskEngine/${taskId}] Failed.`, { errorMessage: error.message, errorStack: error.stack });
|
|
61
|
+
|
|
62
|
+
} finally {
|
|
63
|
+
try {
|
|
64
|
+
// --- OPTIMIZATION START ---
|
|
65
|
+
// The FirestoreBatchManager will flush itself on its own timer.
|
|
66
|
+
// We no longer call `await batchManager.flushBatches()` here.
|
|
67
|
+
// We *only* need to flush the header performance updates at the end of the task.
|
|
68
|
+
await headerManager.flushPerformanceUpdates();
|
|
69
|
+
logger.log('INFO', `[TaskEngine/${taskId}] Final header performance flush complete.`);
|
|
70
|
+
// --- OPTIMIZATION END ---
|
|
71
|
+
} catch (flushError) {
|
|
72
|
+
logger.log('ERROR', `[TaskEngine/${taskId}] Error during final flush attempt.`, { error: flushError.message });
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
module.exports = {
|
|
78
|
+
handleRequest,
|
|
79
79
|
};
|
|
@@ -1,125 +1,125 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* @fileoverview Sub-pipe: pipe.taskEngine.handleDiscover
|
|
3
|
-
* REFACTORED: Now stateless and receives dependencies.
|
|
4
|
-
*/
|
|
5
|
-
|
|
6
|
-
/**
|
|
7
|
-
* Sub-pipe: pipe.taskEngine.handleDiscover
|
|
8
|
-
* @param {object} task The Pub/Sub task payload.
|
|
9
|
-
* @param {string} taskId A unique ID for logging.
|
|
10
|
-
* @param {object} dependencies - Contains db, pubsub, logger, headerManager, proxyManager, batchManager.
|
|
11
|
-
* @param {object} config The configuration object.
|
|
12
|
-
*/
|
|
13
|
-
async function handleDiscover(task, taskId, dependencies, config) {
|
|
14
|
-
const { logger, headerManager, proxyManager, pubsub, batchManager } = dependencies;
|
|
15
|
-
const { cids, blockId, instrument, userType } = task;
|
|
16
|
-
|
|
17
|
-
const url = `${config.ETORO_API_RANKINGS_URL}?Period=LastTwoYears`;
|
|
18
|
-
const selectedHeader = await headerManager.selectHeader();
|
|
19
|
-
if (!selectedHeader) throw new Error("Could not select a header.");
|
|
20
|
-
|
|
21
|
-
let wasSuccess = false;
|
|
22
|
-
try {
|
|
23
|
-
if (userType === 'speculator') {
|
|
24
|
-
// Use the batchManager from dependencies
|
|
25
|
-
batchManager.addProcessedSpeculatorCids(cids);
|
|
26
|
-
logger.log('INFO', `[DISCOVER] Added ${cids.length} speculator CIDs to the in-memory set to be flushed.`);
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
// Use the proxyManager from dependencies
|
|
30
|
-
const response = await proxyManager.fetch(url, {
|
|
31
|
-
method: 'POST',
|
|
32
|
-
headers: { ...selectedHeader.header, 'Content-Type': 'application/json' },
|
|
33
|
-
body: JSON.stringify(cids),
|
|
34
|
-
});
|
|
35
|
-
if (!response.ok) {
|
|
36
|
-
throw new Error(`API status ${response.status}`);
|
|
37
|
-
}
|
|
38
|
-
wasSuccess = true;
|
|
39
|
-
|
|
40
|
-
const publicUsers = await response.json();
|
|
41
|
-
if (!Array.isArray(publicUsers)) return;
|
|
42
|
-
|
|
43
|
-
const oneMonthAgo = new Date();
|
|
44
|
-
oneMonthAgo.setMonth(oneMonthAgo.getMonth() - 1);
|
|
45
|
-
|
|
46
|
-
const preliminaryActiveUsers = publicUsers.filter(user =>
|
|
47
|
-
new Date(user.Value.LastActivity) > oneMonthAgo &&
|
|
48
|
-
user.Value.DailyGain !== 0 &&
|
|
49
|
-
user.Value.Exposure !== 0 &&
|
|
50
|
-
user.Value.RiskScore !== 0
|
|
51
|
-
);
|
|
52
|
-
|
|
53
|
-
let finalActiveUsers = [];
|
|
54
|
-
const invalidCidsToLog = [];
|
|
55
|
-
|
|
56
|
-
if (userType === 'speculator') {
|
|
57
|
-
const publicUserCids = new Set(publicUsers.map(u => u.CID));
|
|
58
|
-
|
|
59
|
-
if (publicUserCids.size > 0 && publicUserCids.size < cids.length) {
|
|
60
|
-
const privateUserCids = cids.filter(cid => !publicUserCids.has(cid));
|
|
61
|
-
invalidCidsToLog.push(...privateUserCids);
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
const activeUserCids = new Set(preliminaryActiveUsers.map(u => u.CID));
|
|
65
|
-
const inactiveUserCids = publicUsers
|
|
66
|
-
.filter(u => !activeUserCids.has(u.CID))
|
|
67
|
-
.map(u => u.CID);
|
|
68
|
-
invalidCidsToLog.push(...inactiveUserCids);
|
|
69
|
-
|
|
70
|
-
logger.log('INFO', `[DISCOVER] Applying new speculator pre-filter to ${preliminaryActiveUsers.length} active users.`);
|
|
71
|
-
const nonSpeculatorCids = [];
|
|
72
|
-
|
|
73
|
-
for (const user of preliminaryActiveUsers) {
|
|
74
|
-
const v = user.Value;
|
|
75
|
-
const totalLeverage = (v.MediumLeveragePct || 0) + (v.HighLeveragePct || 0);
|
|
76
|
-
|
|
77
|
-
const isLikelySpeculator = (
|
|
78
|
-
(v.Trades || 0) > 500 ||
|
|
79
|
-
(v.TotalTradedInstruments || 0) > 50 ||
|
|
80
|
-
totalLeverage > 50 ||
|
|
81
|
-
(v.WeeklyDD || 0) < -25
|
|
82
|
-
);
|
|
83
|
-
|
|
84
|
-
if (isLikelySpeculator) {
|
|
85
|
-
finalActiveUsers.push(user);
|
|
86
|
-
} else {
|
|
87
|
-
nonSpeculatorCids.push(user.CID);
|
|
88
|
-
}
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
invalidCidsToLog.push(...nonSpeculatorCids);
|
|
92
|
-
logger.log('INFO', `[DISCOVER] Pre-filter complete. ${finalActiveUsers.length} users passed. ${nonSpeculatorCids.length} users failed heuristic.`);
|
|
93
|
-
|
|
94
|
-
if (invalidCidsToLog.length > 0) {
|
|
95
|
-
// Use pubsub from dependencies
|
|
96
|
-
await pubsub.topic(config.PUBSUB_TOPIC_INVALID_SPECULATOR_LOG)
|
|
97
|
-
.publishMessage({ json: { invalidCids: invalidCidsToLog } });
|
|
98
|
-
logger.log('INFO', `[DISCOVER] Reported ${invalidCidsToLog.length} invalid (private, inactive, or failed heuristic) speculator IDs.`);
|
|
99
|
-
}
|
|
100
|
-
|
|
101
|
-
} else { // 'normal' users
|
|
102
|
-
finalActiveUsers = preliminaryActiveUsers;
|
|
103
|
-
}
|
|
104
|
-
|
|
105
|
-
if (finalActiveUsers.length > 0) {
|
|
106
|
-
const verificationTask = {
|
|
107
|
-
type: 'verify',
|
|
108
|
-
users: finalActiveUsers.map(u => ({ cid: u.CID, isBronze: u.Value.IsBronze })),
|
|
109
|
-
blockId,
|
|
110
|
-
instrument,
|
|
111
|
-
userType
|
|
112
|
-
};
|
|
113
|
-
// Use pubsub from dependencies
|
|
114
|
-
await pubsub.topic(config.PUBSUB_TOPIC_USER_FETCH)
|
|
115
|
-
.publishMessage({ json: verificationTask });
|
|
116
|
-
logger.log('INFO', `[DISCOVER] Verification message published was : ${JSON.stringify(verificationTask)} `);
|
|
117
|
-
logger.log('INFO', `[DISCOVER] Chaining to 'verify' task for ${finalActiveUsers.length} active users.`);
|
|
118
|
-
}
|
|
119
|
-
|
|
120
|
-
} finally {
|
|
121
|
-
if (selectedHeader) headerManager.updatePerformance(selectedHeader.id, wasSuccess);
|
|
122
|
-
}
|
|
123
|
-
}
|
|
124
|
-
|
|
125
|
-
module.exports = { handleDiscover };
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Sub-pipe: pipe.taskEngine.handleDiscover
|
|
3
|
+
* REFACTORED: Now stateless and receives dependencies.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Sub-pipe: pipe.taskEngine.handleDiscover
|
|
8
|
+
* @param {object} task The Pub/Sub task payload.
|
|
9
|
+
* @param {string} taskId A unique ID for logging.
|
|
10
|
+
* @param {object} dependencies - Contains db, pubsub, logger, headerManager, proxyManager, batchManager.
|
|
11
|
+
* @param {object} config The configuration object.
|
|
12
|
+
*/
|
|
13
|
+
async function handleDiscover(task, taskId, dependencies, config) {
|
|
14
|
+
const { logger, headerManager, proxyManager, pubsub, batchManager } = dependencies;
|
|
15
|
+
const { cids, blockId, instrument, userType } = task;
|
|
16
|
+
|
|
17
|
+
const url = `${config.ETORO_API_RANKINGS_URL}?Period=LastTwoYears`;
|
|
18
|
+
const selectedHeader = await headerManager.selectHeader();
|
|
19
|
+
if (!selectedHeader) throw new Error("Could not select a header.");
|
|
20
|
+
|
|
21
|
+
let wasSuccess = false;
|
|
22
|
+
try {
|
|
23
|
+
if (userType === 'speculator') {
|
|
24
|
+
// Use the batchManager from dependencies
|
|
25
|
+
batchManager.addProcessedSpeculatorCids(cids);
|
|
26
|
+
logger.log('INFO', `[DISCOVER] Added ${cids.length} speculator CIDs to the in-memory set to be flushed.`);
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// Use the proxyManager from dependencies
|
|
30
|
+
const response = await proxyManager.fetch(url, {
|
|
31
|
+
method: 'POST',
|
|
32
|
+
headers: { ...selectedHeader.header, 'Content-Type': 'application/json' },
|
|
33
|
+
body: JSON.stringify(cids),
|
|
34
|
+
});
|
|
35
|
+
if (!response.ok) {
|
|
36
|
+
throw new Error(`API status ${response.status}`);
|
|
37
|
+
}
|
|
38
|
+
wasSuccess = true;
|
|
39
|
+
|
|
40
|
+
const publicUsers = await response.json();
|
|
41
|
+
if (!Array.isArray(publicUsers)) return;
|
|
42
|
+
|
|
43
|
+
const oneMonthAgo = new Date();
|
|
44
|
+
oneMonthAgo.setMonth(oneMonthAgo.getMonth() - 1);
|
|
45
|
+
|
|
46
|
+
const preliminaryActiveUsers = publicUsers.filter(user =>
|
|
47
|
+
new Date(user.Value.LastActivity) > oneMonthAgo &&
|
|
48
|
+
user.Value.DailyGain !== 0 &&
|
|
49
|
+
user.Value.Exposure !== 0 &&
|
|
50
|
+
user.Value.RiskScore !== 0
|
|
51
|
+
);
|
|
52
|
+
|
|
53
|
+
let finalActiveUsers = [];
|
|
54
|
+
const invalidCidsToLog = [];
|
|
55
|
+
|
|
56
|
+
if (userType === 'speculator') {
|
|
57
|
+
const publicUserCids = new Set(publicUsers.map(u => u.CID));
|
|
58
|
+
|
|
59
|
+
if (publicUserCids.size > 0 && publicUserCids.size < cids.length) {
|
|
60
|
+
const privateUserCids = cids.filter(cid => !publicUserCids.has(cid));
|
|
61
|
+
invalidCidsToLog.push(...privateUserCids);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
const activeUserCids = new Set(preliminaryActiveUsers.map(u => u.CID));
|
|
65
|
+
const inactiveUserCids = publicUsers
|
|
66
|
+
.filter(u => !activeUserCids.has(u.CID))
|
|
67
|
+
.map(u => u.CID);
|
|
68
|
+
invalidCidsToLog.push(...inactiveUserCids);
|
|
69
|
+
|
|
70
|
+
logger.log('INFO', `[DISCOVER] Applying new speculator pre-filter to ${preliminaryActiveUsers.length} active users.`);
|
|
71
|
+
const nonSpeculatorCids = [];
|
|
72
|
+
|
|
73
|
+
for (const user of preliminaryActiveUsers) {
|
|
74
|
+
const v = user.Value;
|
|
75
|
+
const totalLeverage = (v.MediumLeveragePct || 0) + (v.HighLeveragePct || 0);
|
|
76
|
+
|
|
77
|
+
const isLikelySpeculator = (
|
|
78
|
+
(v.Trades || 0) > 500 ||
|
|
79
|
+
(v.TotalTradedInstruments || 0) > 50 ||
|
|
80
|
+
totalLeverage > 50 ||
|
|
81
|
+
(v.WeeklyDD || 0) < -25
|
|
82
|
+
);
|
|
83
|
+
|
|
84
|
+
if (isLikelySpeculator) {
|
|
85
|
+
finalActiveUsers.push(user);
|
|
86
|
+
} else {
|
|
87
|
+
nonSpeculatorCids.push(user.CID);
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
invalidCidsToLog.push(...nonSpeculatorCids);
|
|
92
|
+
logger.log('INFO', `[DISCOVER] Pre-filter complete. ${finalActiveUsers.length} users passed. ${nonSpeculatorCids.length} users failed heuristic.`);
|
|
93
|
+
|
|
94
|
+
if (invalidCidsToLog.length > 0) {
|
|
95
|
+
// Use pubsub from dependencies
|
|
96
|
+
await pubsub.topic(config.PUBSUB_TOPIC_INVALID_SPECULATOR_LOG)
|
|
97
|
+
.publishMessage({ json: { invalidCids: invalidCidsToLog } });
|
|
98
|
+
logger.log('INFO', `[DISCOVER] Reported ${invalidCidsToLog.length} invalid (private, inactive, or failed heuristic) speculator IDs.`);
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
} else { // 'normal' users
|
|
102
|
+
finalActiveUsers = preliminaryActiveUsers;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (finalActiveUsers.length > 0) {
|
|
106
|
+
const verificationTask = {
|
|
107
|
+
type: 'verify',
|
|
108
|
+
users: finalActiveUsers.map(u => ({ cid: u.CID, isBronze: u.Value.IsBronze })),
|
|
109
|
+
blockId,
|
|
110
|
+
instrument,
|
|
111
|
+
userType
|
|
112
|
+
};
|
|
113
|
+
// Use pubsub from dependencies
|
|
114
|
+
await pubsub.topic(config.PUBSUB_TOPIC_USER_FETCH)
|
|
115
|
+
.publishMessage({ json: verificationTask });
|
|
116
|
+
logger.log('INFO', `[DISCOVER] Verification message published was : ${JSON.stringify(verificationTask)} `);
|
|
117
|
+
logger.log('INFO', `[DISCOVER] Chaining to 'verify' task for ${finalActiveUsers.length} active users.`);
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
} finally {
|
|
121
|
+
if (selectedHeader) headerManager.updatePerformance(selectedHeader.id, wasSuccess);
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
module.exports = { handleDiscover };
|
|
@@ -1,119 +1,119 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* @fileoverview Sub-pipe: pipe.taskEngine.handleUpdate
|
|
3
|
-
* REFACTORED: Now stateless and receives dependencies.
|
|
4
|
-
* OPTIMIZED: Removed immediate batch commit for speculator timestamp fix.
|
|
5
|
-
*/
|
|
6
|
-
const { FieldValue } = require('@google-cloud/firestore');
|
|
7
|
-
|
|
8
|
-
/**
|
|
9
|
-
* Sub-pipe: pipe.taskEngine.handleUpdate
|
|
10
|
-
* @param {object} task The Pub/Sub task payload.
|
|
11
|
-
* @param {string} taskId A unique ID for logging.
|
|
12
|
-
* @param {object} dependencies - Contains db, pubsub, logger, headerManager, proxyManager, batchManager.
|
|
13
|
-
* @param {object} config The configuration object.
|
|
14
|
-
*/
|
|
15
|
-
async function handleUpdate(task, taskId, dependencies, config) {
|
|
16
|
-
const { logger, headerManager, proxyManager, db, batchManager } = dependencies;
|
|
17
|
-
const { userId, instrumentId, userType } = task;
|
|
18
|
-
|
|
19
|
-
const selectedHeader = await headerManager.selectHeader();
|
|
20
|
-
if (!selectedHeader) throw new Error("Could not select a header.");
|
|
21
|
-
|
|
22
|
-
let wasSuccess = false;
|
|
23
|
-
try {
|
|
24
|
-
const url = userType === 'speculator'
|
|
25
|
-
? `${config.ETORO_API_POSITIONS_URL}?cid=${userId}&InstrumentID=${instrumentId}`
|
|
26
|
-
: `${config.ETORO_API_PORTFOLIO_URL}?cid=${userId}`;
|
|
27
|
-
|
|
28
|
-
logger.log('INFO', `[UPDATE] Fetching portfolio for user ${userId} (${userType} with url ${url})`);
|
|
29
|
-
|
|
30
|
-
// Use proxyManager from dependencies
|
|
31
|
-
const response = await proxyManager.fetch(url, { headers: selectedHeader.header });
|
|
32
|
-
|
|
33
|
-
if (!response || typeof response.text !== 'function') {
|
|
34
|
-
logger.log('ERROR', `[UPDATE] Invalid or incomplete response received for user ${userId}`, { response });
|
|
35
|
-
throw new Error(`Invalid response structure received from proxy for user ${userId}.`);
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
const responseBody = await response.text();
|
|
39
|
-
|
|
40
|
-
if (responseBody.includes("user is PRIVATE")) {
|
|
41
|
-
logger.log('WARN', `User ${userId} is private. Removing from future updates and decrementing block count.`);
|
|
42
|
-
|
|
43
|
-
// Use batchManager from dependencies
|
|
44
|
-
batchManager.deleteFromTimestampBatch(userId, userType, instrumentId);
|
|
45
|
-
|
|
46
|
-
// <<< START FIX for private user blockId format >>>
|
|
47
|
-
// The blockId format for Speculator Blocks is numeric (e.g., "1000000")
|
|
48
|
-
let blockId;
|
|
49
|
-
let incrementField;
|
|
50
|
-
|
|
51
|
-
if (userType === 'speculator') {
|
|
52
|
-
blockId = String(Math.floor(parseInt(userId) / 1000000) * 1000000);
|
|
53
|
-
incrementField = `counts.${instrumentId}_${blockId}`;
|
|
54
|
-
} else {
|
|
55
|
-
|
|
56
|
-
// <<< START FIX for Normal User Block ID >>>
|
|
57
|
-
// BUG: This was calculating '19M', which is wrong for the counter.
|
|
58
|
-
// blockId = `${Math.floor(parseInt(userId) / 1000000)}M`;
|
|
59
|
-
|
|
60
|
-
// FIX: Use the numeric '19000000' format, just like the orchestrator.
|
|
61
|
-
blockId = String(Math.floor(parseInt(userId) / 1000000) * 1000000);
|
|
62
|
-
// <<< END FIX for Normal User Block ID >>>
|
|
63
|
-
|
|
64
|
-
incrementField = `counts.${blockId}`;
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
// Use db from dependencies
|
|
68
|
-
const blockCountsRef = db.doc(userType === 'speculator'
|
|
69
|
-
? config.FIRESTORE_DOC_SPECULATOR_BLOCK_COUNTS
|
|
70
|
-
: config.FIRESTORE_DOC_BLOCK_COUNTS);
|
|
71
|
-
// <<< END FIX for private user blockId format >>>
|
|
72
|
-
|
|
73
|
-
// This is a single, immediate write, which is fine for this rare case.
|
|
74
|
-
await blockCountsRef.set({ [incrementField]: FieldValue.increment(-1) }, { merge: true });
|
|
75
|
-
|
|
76
|
-
return;
|
|
77
|
-
}
|
|
78
|
-
|
|
79
|
-
if (!response.ok) {
|
|
80
|
-
throw new Error(`API Error: Status ${response.status}`);
|
|
81
|
-
}
|
|
82
|
-
wasSuccess = true;
|
|
83
|
-
const portfolioData = JSON.parse(responseBody);
|
|
84
|
-
|
|
85
|
-
const today = new Date().toISOString().slice(0, 10);
|
|
86
|
-
|
|
87
|
-
// <<< START OPTIMIZATION / FULL CODE FIX >>>
|
|
88
|
-
|
|
89
|
-
// BUG 1: The blockId format is different for portfolio storage ('1M')
|
|
90
|
-
// vs. orchestrator logic ('1000000').
|
|
91
|
-
const portfolioStorageBlockId = `${Math.floor(parseInt(userId) / 1000000)}M`;
|
|
92
|
-
|
|
93
|
-
// This call is correct for storing portfolio data (which uses the 'M' format)
|
|
94
|
-
await batchManager.addToPortfolioBatch(userId, portfolioStorageBlockId, today, portfolioData, userType, instrumentId);
|
|
95
|
-
|
|
96
|
-
// This call is correct for 'normal' users.
|
|
97
|
-
await batchManager.updateUserTimestamp(userId, userType, instrumentId);
|
|
98
|
-
|
|
99
|
-
// BUG 2 (The Main Problem): We must *also* update the 'SpeculatorBlocks'
|
|
100
|
-
// collection, which is what the orchestrator *does* read.
|
|
101
|
-
if (userType === 'speculator') {
|
|
102
|
-
const orchestratorBlockId = String(Math.floor(parseInt(userId) / 1000000) * 1000000);
|
|
103
|
-
|
|
104
|
-
logger.log('INFO', `[UPDATE] Applying speculator timestamp fix for user ${userId} in block ${orchestratorBlockId}`);
|
|
105
|
-
|
|
106
|
-
// --- OPTIMIZATION: Use the batch manager instead of committing immediately ---
|
|
107
|
-
// This queues the write and allows the function to return,
|
|
108
|
-
// instead of blocking on `await fixBatch.commit()`.
|
|
109
|
-
await batchManager.addSpeculatorTimestampFix(userId, orchestratorBlockId);
|
|
110
|
-
logger.log('INFO', `[UPDATE] Speculator timestamp fix for user ${userId} queued.`);
|
|
111
|
-
}
|
|
112
|
-
// <<< END OPTIMIZATION / FULL CODE FIX >>>
|
|
113
|
-
|
|
114
|
-
} finally {
|
|
115
|
-
if (selectedHeader) headerManager.updatePerformance(selectedHeader.id, wasSuccess);
|
|
116
|
-
}
|
|
117
|
-
}
|
|
118
|
-
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Sub-pipe: pipe.taskEngine.handleUpdate
|
|
3
|
+
* REFACTORED: Now stateless and receives dependencies.
|
|
4
|
+
* OPTIMIZED: Removed immediate batch commit for speculator timestamp fix.
|
|
5
|
+
*/
|
|
6
|
+
const { FieldValue } = require('@google-cloud/firestore');
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Sub-pipe: pipe.taskEngine.handleUpdate
|
|
10
|
+
* @param {object} task The Pub/Sub task payload.
|
|
11
|
+
* @param {string} taskId A unique ID for logging.
|
|
12
|
+
* @param {object} dependencies - Contains db, pubsub, logger, headerManager, proxyManager, batchManager.
|
|
13
|
+
* @param {object} config The configuration object.
|
|
14
|
+
*/
|
|
15
|
+
async function handleUpdate(task, taskId, dependencies, config) {
|
|
16
|
+
const { logger, headerManager, proxyManager, db, batchManager } = dependencies;
|
|
17
|
+
const { userId, instrumentId, userType } = task;
|
|
18
|
+
|
|
19
|
+
const selectedHeader = await headerManager.selectHeader();
|
|
20
|
+
if (!selectedHeader) throw new Error("Could not select a header.");
|
|
21
|
+
|
|
22
|
+
let wasSuccess = false;
|
|
23
|
+
try {
|
|
24
|
+
const url = userType === 'speculator'
|
|
25
|
+
? `${config.ETORO_API_POSITIONS_URL}?cid=${userId}&InstrumentID=${instrumentId}`
|
|
26
|
+
: `${config.ETORO_API_PORTFOLIO_URL}?cid=${userId}`;
|
|
27
|
+
|
|
28
|
+
logger.log('INFO', `[UPDATE] Fetching portfolio for user ${userId} (${userType} with url ${url})`);
|
|
29
|
+
|
|
30
|
+
// Use proxyManager from dependencies
|
|
31
|
+
const response = await proxyManager.fetch(url, { headers: selectedHeader.header });
|
|
32
|
+
|
|
33
|
+
if (!response || typeof response.text !== 'function') {
|
|
34
|
+
logger.log('ERROR', `[UPDATE] Invalid or incomplete response received for user ${userId}`, { response });
|
|
35
|
+
throw new Error(`Invalid response structure received from proxy for user ${userId}.`);
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const responseBody = await response.text();
|
|
39
|
+
|
|
40
|
+
if (responseBody.includes("user is PRIVATE")) {
|
|
41
|
+
logger.log('WARN', `User ${userId} is private. Removing from future updates and decrementing block count.`);
|
|
42
|
+
|
|
43
|
+
// Use batchManager from dependencies
|
|
44
|
+
batchManager.deleteFromTimestampBatch(userId, userType, instrumentId);
|
|
45
|
+
|
|
46
|
+
// <<< START FIX for private user blockId format >>>
|
|
47
|
+
// The blockId format for Speculator Blocks is numeric (e.g., "1000000")
|
|
48
|
+
let blockId;
|
|
49
|
+
let incrementField;
|
|
50
|
+
|
|
51
|
+
if (userType === 'speculator') {
|
|
52
|
+
blockId = String(Math.floor(parseInt(userId) / 1000000) * 1000000);
|
|
53
|
+
incrementField = `counts.${instrumentId}_${blockId}`;
|
|
54
|
+
} else {
|
|
55
|
+
|
|
56
|
+
// <<< START FIX for Normal User Block ID >>>
|
|
57
|
+
// BUG: This was calculating '19M', which is wrong for the counter.
|
|
58
|
+
// blockId = `${Math.floor(parseInt(userId) / 1000000)}M`;
|
|
59
|
+
|
|
60
|
+
// FIX: Use the numeric '19000000' format, just like the orchestrator.
|
|
61
|
+
blockId = String(Math.floor(parseInt(userId) / 1000000) * 1000000);
|
|
62
|
+
// <<< END FIX for Normal User Block ID >>>
|
|
63
|
+
|
|
64
|
+
incrementField = `counts.${blockId}`;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
// Use db from dependencies
|
|
68
|
+
const blockCountsRef = db.doc(userType === 'speculator'
|
|
69
|
+
? config.FIRESTORE_DOC_SPECULATOR_BLOCK_COUNTS
|
|
70
|
+
: config.FIRESTORE_DOC_BLOCK_COUNTS);
|
|
71
|
+
// <<< END FIX for private user blockId format >>>
|
|
72
|
+
|
|
73
|
+
// This is a single, immediate write, which is fine for this rare case.
|
|
74
|
+
await blockCountsRef.set({ [incrementField]: FieldValue.increment(-1) }, { merge: true });
|
|
75
|
+
|
|
76
|
+
return;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
if (!response.ok) {
|
|
80
|
+
throw new Error(`API Error: Status ${response.status}`);
|
|
81
|
+
}
|
|
82
|
+
wasSuccess = true;
|
|
83
|
+
const portfolioData = JSON.parse(responseBody);
|
|
84
|
+
|
|
85
|
+
const today = new Date().toISOString().slice(0, 10);
|
|
86
|
+
|
|
87
|
+
// <<< START OPTIMIZATION / FULL CODE FIX >>>
|
|
88
|
+
|
|
89
|
+
// BUG 1: The blockId format is different for portfolio storage ('1M')
|
|
90
|
+
// vs. orchestrator logic ('1000000').
|
|
91
|
+
const portfolioStorageBlockId = `${Math.floor(parseInt(userId) / 1000000)}M`;
|
|
92
|
+
|
|
93
|
+
// This call is correct for storing portfolio data (which uses the 'M' format)
|
|
94
|
+
await batchManager.addToPortfolioBatch(userId, portfolioStorageBlockId, today, portfolioData, userType, instrumentId);
|
|
95
|
+
|
|
96
|
+
// This call is correct for 'normal' users.
|
|
97
|
+
await batchManager.updateUserTimestamp(userId, userType, instrumentId);
|
|
98
|
+
|
|
99
|
+
// BUG 2 (The Main Problem): We must *also* update the 'SpeculatorBlocks'
|
|
100
|
+
// collection, which is what the orchestrator *does* read.
|
|
101
|
+
if (userType === 'speculator') {
|
|
102
|
+
const orchestratorBlockId = String(Math.floor(parseInt(userId) / 1000000) * 1000000);
|
|
103
|
+
|
|
104
|
+
logger.log('INFO', `[UPDATE] Applying speculator timestamp fix for user ${userId} in block ${orchestratorBlockId}`);
|
|
105
|
+
|
|
106
|
+
// --- OPTIMIZATION: Use the batch manager instead of committing immediately ---
|
|
107
|
+
// This queues the write and allows the function to return,
|
|
108
|
+
// instead of blocking on `await fixBatch.commit()`.
|
|
109
|
+
await batchManager.addSpeculatorTimestampFix(userId, orchestratorBlockId);
|
|
110
|
+
logger.log('INFO', `[UPDATE] Speculator timestamp fix for user ${userId} queued.`);
|
|
111
|
+
}
|
|
112
|
+
// <<< END OPTIMIZATION / FULL CODE FIX >>>
|
|
113
|
+
|
|
114
|
+
} finally {
|
|
115
|
+
if (selectedHeader) headerManager.updatePerformance(selectedHeader.id, wasSuccess);
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
119
|
module.exports = { handleUpdate };
|