bulltrackers-module 1.0.64 → 1.0.65
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -2,6 +2,8 @@
|
|
|
2
2
|
* @fileoverview Main pipe for the Task Engine.
|
|
3
3
|
* REFACTORED: This file is now 'handler_creator.js' in name only.
|
|
4
4
|
* It exports the main 'handleRequest' pipe function.
|
|
5
|
+
* OPTIMIZED: Removed 'batchManager.flushBatches()' from 'finally' block
|
|
6
|
+
* to enable true asynchronous batching.
|
|
5
7
|
*/
|
|
6
8
|
|
|
7
9
|
// --- 1. REMOVE the circular require ---
|
|
@@ -21,6 +23,7 @@ const { handleUpdate } = require('./helpers/update_helpers');
|
|
|
21
23
|
* @param {object} dependencies - Contains all clients: db, pubsub, logger, headerManager, proxyManager, batchManager.
|
|
22
24
|
*/
|
|
23
25
|
async function handleRequest(message, context, config, dependencies) {
|
|
26
|
+
// --- OPTIMIZATION: Destructure headerManager here ---
|
|
24
27
|
const { logger, batchManager, headerManager } = dependencies;
|
|
25
28
|
|
|
26
29
|
if (!message || !message.data) {
|
|
@@ -65,13 +68,16 @@ async function handleRequest(message, context, config, dependencies) {
|
|
|
65
68
|
// Let's rely on sub-pipes' finally blocks.
|
|
66
69
|
} finally {
|
|
67
70
|
try {
|
|
68
|
-
//
|
|
69
|
-
|
|
70
|
-
|
|
71
|
+
// --- OPTIMIZATION START ---
|
|
72
|
+
// The FirestoreBatchManager will flush itself on its own timer.
|
|
73
|
+
// We no longer call `await batchManager.flushBatches()` here.
|
|
74
|
+
// We *only* need to flush the header performance updates at the end of the task.
|
|
75
|
+
await headerManager.flushPerformanceUpdates();
|
|
76
|
+
logger.log('INFO', `[TaskEngine/${taskId}] Final header performance flush complete.`);
|
|
77
|
+
// --- OPTIMIZATION END ---
|
|
71
78
|
} catch (flushError) {
|
|
72
79
|
logger.log('ERROR', `[TaskEngine/${taskId}] Error during final flush attempt.`, { error: flushError.message });
|
|
73
80
|
}
|
|
74
|
-
// Note: Flushing headerManager is handled by batchManager
|
|
75
81
|
}
|
|
76
82
|
}
|
|
77
83
|
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Sub-pipe: pipe.taskEngine.handleUpdate
|
|
3
3
|
* REFACTORED: Now stateless and receives dependencies.
|
|
4
|
+
* OPTIMIZED: Removed immediate batch commit for speculator timestamp fix.
|
|
4
5
|
*/
|
|
5
6
|
const { FieldValue } = require('@google-cloud/firestore');
|
|
6
7
|
|
|
@@ -69,6 +70,7 @@ async function handleUpdate(task, taskId, dependencies, config) {
|
|
|
69
70
|
: config.FIRESTORE_DOC_BLOCK_COUNTS);
|
|
70
71
|
// <<< END FIX for private user blockId format >>>
|
|
71
72
|
|
|
73
|
+
// This is a single, immediate write, which is fine for this rare case.
|
|
72
74
|
await blockCountsRef.set({ [incrementField]: FieldValue.increment(-1) }, { merge: true });
|
|
73
75
|
|
|
74
76
|
return;
|
|
@@ -82,7 +84,7 @@ async function handleUpdate(task, taskId, dependencies, config) {
|
|
|
82
84
|
|
|
83
85
|
const today = new Date().toISOString().slice(0, 10);
|
|
84
86
|
|
|
85
|
-
// <<< START FULL CODE FIX >>>
|
|
87
|
+
// <<< START OPTIMIZATION / FULL CODE FIX >>>
|
|
86
88
|
|
|
87
89
|
// BUG 1: The blockId format is different for portfolio storage ('1M')
|
|
88
90
|
// vs. orchestrator logic ('1000000').
|
|
@@ -91,9 +93,7 @@ async function handleUpdate(task, taskId, dependencies, config) {
|
|
|
91
93
|
// This call is correct for storing portfolio data (which uses the 'M' format)
|
|
92
94
|
await batchManager.addToPortfolioBatch(userId, portfolioStorageBlockId, today, portfolioData, userType, instrumentId);
|
|
93
95
|
|
|
94
|
-
// This call is correct for 'normal' users
|
|
95
|
-
// it writes to a timestamp doc that the orchestrator *does not read*.
|
|
96
|
-
// We still call it to update 'normal' users correctly.
|
|
96
|
+
// This call is correct for 'normal' users.
|
|
97
97
|
await batchManager.updateUserTimestamp(userId, userType, instrumentId);
|
|
98
98
|
|
|
99
99
|
// BUG 2 (The Main Problem): We must *also* update the 'SpeculatorBlocks'
|
|
@@ -103,22 +103,13 @@ async function handleUpdate(task, taskId, dependencies, config) {
|
|
|
103
103
|
|
|
104
104
|
logger.log('INFO', `[UPDATE] Applying speculator timestamp fix for user ${userId} in block ${orchestratorBlockId}`);
|
|
105
105
|
|
|
106
|
-
//
|
|
107
|
-
// This
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
// These are the two fields the orchestrator and cleanup functions read
|
|
112
|
-
const updates = {
|
|
113
|
-
[`users.${userId}.lastVerified`]: new Date(),
|
|
114
|
-
[`users.${userId}.lastHeldSpeculatorAsset`]: new Date()
|
|
115
|
-
};
|
|
116
|
-
|
|
117
|
-
fixBatch.set(speculatorBlockRef, updates, { merge: true });
|
|
118
|
-
await fixBatch.commit();
|
|
119
|
-
logger.log('INFO', `[UPDATE] Speculator timestamp fix for user ${userId} committed.`);
|
|
106
|
+
// --- OPTIMIZATION: Use the batch manager instead of committing immediately ---
|
|
107
|
+
// This queues the write and allows the function to return,
|
|
108
|
+
// instead of blocking on `await fixBatch.commit()`.
|
|
109
|
+
await batchManager.addSpeculatorTimestampFix(userId, orchestratorBlockId);
|
|
110
|
+
logger.log('INFO', `[UPDATE] Speculator timestamp fix for user ${userId} queued.`);
|
|
120
111
|
}
|
|
121
|
-
// <<< END FULL CODE FIX >>>
|
|
112
|
+
// <<< END OPTIMIZATION / FULL CODE FIX >>>
|
|
122
113
|
|
|
123
114
|
} finally {
|
|
124
115
|
if (selectedHeader) headerManager.updatePerformance(selectedHeader.id, wasSuccess);
|
|
@@ -1,9 +1,81 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Sub-pipe: pipe.taskEngine.handleVerify
|
|
3
3
|
* REFACTORED: Now stateless and receives dependencies.
|
|
4
|
+
* OPTIMIZED: Fetches all user portfolios in parallel to reduce function runtime.
|
|
4
5
|
*/
|
|
5
6
|
const { FieldValue } = require('@google-cloud/firestore');
|
|
6
7
|
|
|
8
|
+
/**
|
|
9
|
+
* Internal helper to fetch and process a single user's portfolio.
|
|
10
|
+
* This allows the main function to run these in parallel.
|
|
11
|
+
* @param {object} user - The user object { cid, isBronze }
|
|
12
|
+
* @param {object} dependencies - Contains proxyManager, headerManager
|
|
13
|
+
* @param {object} config - The configuration object
|
|
14
|
+
* @param {Set<number>} speculatorInstrumentSet - Pre-built Set of instrument IDs
|
|
15
|
+
* @returns {Promise<object|null>} A result object for batching, or null on failure.
|
|
16
|
+
*/
|
|
17
|
+
async function fetchAndVerifyUser(user, dependencies, config, speculatorInstrumentSet) {
|
|
18
|
+
const { logger, headerManager, proxyManager } = dependencies;
|
|
19
|
+
const userId = user.cid;
|
|
20
|
+
|
|
21
|
+
const portfolioUrl = `${config.ETORO_API_PORTFOLIO_URL}?cid=${userId}`;
|
|
22
|
+
const selectedHeader = await headerManager.selectHeader();
|
|
23
|
+
if (!selectedHeader) return null; // Cannot fetch
|
|
24
|
+
|
|
25
|
+
let wasSuccess = false;
|
|
26
|
+
try {
|
|
27
|
+
const response = await proxyManager.fetch(portfolioUrl, { headers: selectedHeader.header });
|
|
28
|
+
if (!response.ok) {
|
|
29
|
+
wasSuccess = false;
|
|
30
|
+
return null; // API error or private user
|
|
31
|
+
}
|
|
32
|
+
wasSuccess = true;
|
|
33
|
+
|
|
34
|
+
const portfolioData = await response.json();
|
|
35
|
+
|
|
36
|
+
// Process verification logic *within* the parallel task
|
|
37
|
+
if (config.userType === 'speculator') {
|
|
38
|
+
const matchingInstruments = portfolioData.AggregatedPositions
|
|
39
|
+
.map(p => p.InstrumentID)
|
|
40
|
+
.filter(id => speculatorInstrumentSet.has(id));
|
|
41
|
+
|
|
42
|
+
if (matchingInstruments.length > 0) {
|
|
43
|
+
logger.log('INFO', `[VERIFY] Speculator user ${userId} holds assets: ${matchingInstruments.join(', ')}`);
|
|
44
|
+
// Return data needed for the speculator batch update
|
|
45
|
+
return {
|
|
46
|
+
type: 'speculator',
|
|
47
|
+
userId: userId,
|
|
48
|
+
isBronze: user.isBronze,
|
|
49
|
+
updateData: {
|
|
50
|
+
instruments: matchingInstruments,
|
|
51
|
+
lastVerified: new Date(),
|
|
52
|
+
lastHeldSpeculatorAsset: new Date()
|
|
53
|
+
}
|
|
54
|
+
};
|
|
55
|
+
} else {
|
|
56
|
+
logger.log('INFO', `[VERIFY] Speculator user ${userId} does not hold any speculator assets.`);
|
|
57
|
+
return null; // Valid fetch, but failed verification
|
|
58
|
+
}
|
|
59
|
+
} else { // Normal user verification
|
|
60
|
+
// Return data needed for the normal user batch update
|
|
61
|
+
return {
|
|
62
|
+
type: 'normal',
|
|
63
|
+
userId: userId,
|
|
64
|
+
isBronze: user.isBronze,
|
|
65
|
+
updateData: {
|
|
66
|
+
lastVerified: new Date()
|
|
67
|
+
}
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
} catch (error) {
|
|
71
|
+
logger.log('WARN', `[VERIFY] Error processing user ${userId}`, { errorMessage: error.message });
|
|
72
|
+
return null;
|
|
73
|
+
} finally {
|
|
74
|
+
if (selectedHeader) headerManager.updatePerformance(selectedHeader.id, wasSuccess);
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
|
|
7
79
|
/**
|
|
8
80
|
* Sub-pipe: pipe.taskEngine.handleVerify
|
|
9
81
|
* @param {object} task The Pub/Sub task payload.
|
|
@@ -12,7 +84,7 @@ const { FieldValue } = require('@google-cloud/firestore');
|
|
|
12
84
|
* @param {object} config The configuration object.
|
|
13
85
|
*/
|
|
14
86
|
async function handleVerify(task, taskId, dependencies, config) {
|
|
15
|
-
const { logger,
|
|
87
|
+
const { logger, db } = dependencies;
|
|
16
88
|
const { users, blockId, instrument, userType } = task;
|
|
17
89
|
|
|
18
90
|
// Use db from dependencies
|
|
@@ -20,55 +92,43 @@ async function handleVerify(task, taskId, dependencies, config) {
|
|
|
20
92
|
let validUserCount = 0;
|
|
21
93
|
const speculatorUpdates = {};
|
|
22
94
|
const normalUserUpdates = {};
|
|
23
|
-
const bronzeStateUpdates = {};
|
|
95
|
+
const bronzeStateUpdates = {}; // This can be one object
|
|
24
96
|
|
|
25
97
|
const speculatorInstrumentSet = new Set(config.SPECULATOR_INSTRUMENTS_ARRAY);
|
|
26
98
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
99
|
+
// --- OPTIMIZATION: Run all user fetches in parallel ---
|
|
100
|
+
const verificationPromises = users.map(user =>
|
|
101
|
+
fetchAndVerifyUser(
|
|
102
|
+
user,
|
|
103
|
+
dependencies,
|
|
104
|
+
{ ...config, userType: userType }, // Pass userType into the helper config
|
|
105
|
+
speculatorInstrumentSet
|
|
106
|
+
)
|
|
107
|
+
);
|
|
33
108
|
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
// Use proxyManager from dependencies
|
|
37
|
-
const response = await proxyManager.fetch(portfolioUrl, { headers: selectedHeader.header });
|
|
38
|
-
if (!response.ok) continue;
|
|
39
|
-
wasSuccess = true;
|
|
40
|
-
|
|
41
|
-
const portfolioData = await response.json();
|
|
109
|
+
const results = await Promise.allSettled(verificationPromises);
|
|
110
|
+
// --- END OPTIMIZATION ---
|
|
42
111
|
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
112
|
+
// Process results (this is fast, no I/O)
|
|
113
|
+
results.forEach(result => {
|
|
114
|
+
if (result.status === 'rejected' || !result.value) {
|
|
115
|
+
// Log rejection reason if needed: result.reason
|
|
116
|
+
return;
|
|
117
|
+
}
|
|
47
118
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
else {
|
|
59
|
-
logger.log('INFO', `[VERIFY] Speculator user ${userId} does not hold any speculator assets.`);
|
|
60
|
-
}
|
|
61
|
-
} else { // Normal user verification
|
|
62
|
-
normalUserUpdates[`users.${userId}`] = {
|
|
63
|
-
lastVerified: new Date()
|
|
64
|
-
};
|
|
65
|
-
bronzeStateUpdates[userId] = user.isBronze;
|
|
66
|
-
validUserCount++;
|
|
67
|
-
}
|
|
68
|
-
} finally {
|
|
69
|
-
if (selectedHeader) headerManager.updatePerformance(selectedHeader.id, wasSuccess);
|
|
119
|
+
const data = result.value;
|
|
120
|
+
|
|
121
|
+
if (data.type === 'speculator') {
|
|
122
|
+
speculatorUpdates[`users.${data.userId}`] = data.updateData;
|
|
123
|
+
bronzeStateUpdates[data.userId] = data.isBronze;
|
|
124
|
+
validUserCount++;
|
|
125
|
+
} else if (data.type === 'normal') {
|
|
126
|
+
normalUserUpdates[`users.${data.userId}`] = data.updateData;
|
|
127
|
+
bronzeStateUpdates[data.userId] = data.isBronze;
|
|
128
|
+
validUserCount++;
|
|
70
129
|
}
|
|
71
|
-
}
|
|
130
|
+
});
|
|
131
|
+
|
|
72
132
|
|
|
73
133
|
if (Object.keys(speculatorUpdates).length > 0 || Object.keys(normalUserUpdates).length > 0) {
|
|
74
134
|
if (userType === 'speculator') {
|
|
@@ -100,4 +160,4 @@ async function handleVerify(task, taskId, dependencies, config) {
|
|
|
100
160
|
}
|
|
101
161
|
}
|
|
102
162
|
|
|
103
|
-
module.exports = { handleVerify };
|
|
163
|
+
module.exports = { handleVerify };
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Utility class to manage stateful Firestore write batches.
|
|
3
3
|
* REFACTORED: Renamed 'firestore' to 'db' for consistency.
|
|
4
|
+
* OPTIMIZED: Added logic to handle speculator timestamp fixes within the batch.
|
|
4
5
|
*/
|
|
5
6
|
|
|
6
7
|
const { FieldValue } = require('@google-cloud/firestore');
|
|
@@ -22,6 +23,7 @@ class FirestoreBatchManager {
|
|
|
22
23
|
this.portfolioBatch = {};
|
|
23
24
|
this.timestampBatch = {};
|
|
24
25
|
this.processedSpeculatorCids = new Set();
|
|
26
|
+
this.speculatorTimestampFixBatch = {}; // <-- OPTIMIZATION: ADD THIS
|
|
25
27
|
this.batchTimeout = null;
|
|
26
28
|
|
|
27
29
|
this.logger.log('INFO', 'FirestoreBatchManager initialized.');
|
|
@@ -123,6 +125,28 @@ class FirestoreBatchManager {
|
|
|
123
125
|
cids.forEach(cid => this.processedSpeculatorCids.add(cid));
|
|
124
126
|
}
|
|
125
127
|
|
|
128
|
+
/**
|
|
129
|
+
* --- OPTIMIZATION: ADD THIS NEW METHOD ---
|
|
130
|
+
* Adds a speculator timestamp fix to the batch.
|
|
131
|
+
* @param {string} userId
|
|
132
|
+
* @param {string} orchestratorBlockId (e.g., "1000000")
|
|
133
|
+
*/
|
|
134
|
+
async addSpeculatorTimestampFix(userId, orchestratorBlockId) {
|
|
135
|
+
const docPath = `${this.config.FIRESTORE_COLLECTION_SPECULATOR_BLOCKS}/${orchestratorBlockId}`;
|
|
136
|
+
|
|
137
|
+
if (!this.speculatorTimestampFixBatch[docPath]) {
|
|
138
|
+
this.speculatorTimestampFixBatch[docPath] = {};
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// These are the two fields the orchestrator reads
|
|
142
|
+
this.speculatorTimestampFixBatch[docPath][`users.${userId}.lastVerified`] = new Date();
|
|
143
|
+
this.speculatorTimestampFixBatch[docPath][`users.${userId}.lastHeldSpeculatorAsset`] = new Date();
|
|
144
|
+
|
|
145
|
+
this.logger.log('TRACE', `[BATCH] Queued speculator timestamp fix for user ${userId} in block ${orchestratorBlockId}`);
|
|
146
|
+
this._scheduleFlush(); // Schedule a flush
|
|
147
|
+
}
|
|
148
|
+
// --- END NEW METHOD ---
|
|
149
|
+
|
|
126
150
|
/**
|
|
127
151
|
* Flushes all pending writes to Firestore and updates header performance.
|
|
128
152
|
*/
|
|
@@ -206,6 +230,20 @@ class FirestoreBatchManager {
|
|
|
206
230
|
}
|
|
207
231
|
}
|
|
208
232
|
|
|
233
|
+
// --- OPTIMIZATION: ADD THIS LOOP ---
|
|
234
|
+
for (const docPath in this.speculatorTimestampFixBatch) {
|
|
235
|
+
const updates = this.speculatorTimestampFixBatch[docPath];
|
|
236
|
+
if (Object.keys(updates).length > 0) {
|
|
237
|
+
const docRef = this.db.doc(docPath);
|
|
238
|
+
|
|
239
|
+
// Add to the existing batch
|
|
240
|
+
firestoreBatch.set(docRef, updates, { merge: true });
|
|
241
|
+
batchOperationCount++;
|
|
242
|
+
}
|
|
243
|
+
delete this.speculatorTimestampFixBatch[docPath];
|
|
244
|
+
}
|
|
245
|
+
// --- END OPTIMIZATION ---
|
|
246
|
+
|
|
209
247
|
if (batchOperationCount > 0) {
|
|
210
248
|
promises.push(firestoreBatch.commit());
|
|
211
249
|
}
|
|
@@ -217,4 +255,4 @@ class FirestoreBatchManager {
|
|
|
217
255
|
}
|
|
218
256
|
}
|
|
219
257
|
|
|
220
|
-
module.exports = { FirestoreBatchManager };
|
|
258
|
+
module.exports = { FirestoreBatchManager };
|