bulltrackers-module 1.0.2 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/functions/orchestrator/helpers/discovery_helpers.js +1 -1
- package/functions/task-engine/helpers/discovery_helpers.js +121 -0
- package/functions/task-engine/helpers/index.js +10 -0
- package/functions/task-engine/helpers/update_helpers.js +73 -0
- package/functions/task-engine/helpers/verify_helpers.js +100 -0
- package/functions/task-engine/index.js +11 -0
- package/functions/task-engine/utils/api_calls.js +0 -0
- package/functions/task-engine/utils/firestore_batch_manager.js +225 -0
- package/functions/task-engine/utils/firestore_ops.js +0 -0
- package/functions/task-engine/utils/index.js +6 -0
- package/index.js +9 -10
- package/package.json +1 -1
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Helper function for the 'discover' task.
|
|
3
|
+
*/
|
|
4
|
+
const { logger } = require("sharedsetup")(__filename);
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Handles the 'discover' task.
|
|
8
|
+
* @param {object} task The Pub/Sub task payload.
|
|
9
|
+
* @param {string} taskId A unique ID for logging.
|
|
10
|
+
* @param {object} clients Contains { headerManager, proxyManager, pubsub, batchManager }.
|
|
11
|
+
* @param {object} config The configuration object.
|
|
12
|
+
*/
|
|
13
|
+
async function handleDiscover(task, taskId, clients, config) {
|
|
14
|
+
const { cids, blockId, instrument, userType } = task;
|
|
15
|
+
const url = `${config.ETORO_API_RANKINGS_URL}?Period=LastTwoYears`;
|
|
16
|
+
const selectedHeader = await clients.headerManager.selectHeader();
|
|
17
|
+
if (!selectedHeader) throw new Error("Could not select a header.");
|
|
18
|
+
|
|
19
|
+
let wasSuccess = false;
|
|
20
|
+
try {
|
|
21
|
+
if (userType === 'speculator') {
|
|
22
|
+
clients.batchManager.addProcessedSpeculatorCids(cids);
|
|
23
|
+
logger.log('INFO', `[DISCOVER] Added ${cids.length} speculator CIDs to the in-memory set to be flushed.`);
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const { response } = await clients.proxyManager.fetch(url, {
|
|
27
|
+
method: 'POST',
|
|
28
|
+
headers: { ...selectedHeader.headers, 'Content-Type': 'application/json' },
|
|
29
|
+
body: JSON.stringify(cids),
|
|
30
|
+
});
|
|
31
|
+
if (!response.ok) {
|
|
32
|
+
throw new Error(`API status ${response.status}`);
|
|
33
|
+
}
|
|
34
|
+
wasSuccess = true;
|
|
35
|
+
|
|
36
|
+
const publicUsers = await response.json();
|
|
37
|
+
if (!Array.isArray(publicUsers)) return;
|
|
38
|
+
|
|
39
|
+
// --- REFACTORED FILTERING & CHAINING ---
|
|
40
|
+
const oneMonthAgo = new Date();
|
|
41
|
+
oneMonthAgo.setMonth(oneMonthAgo.getMonth() - 1);
|
|
42
|
+
|
|
43
|
+
const preliminaryActiveUsers = publicUsers.filter(user =>
|
|
44
|
+
new Date(user.Value.LastActivity) > oneMonthAgo &&
|
|
45
|
+
user.Value.DailyGain !== 0 &&
|
|
46
|
+
user.Value.Exposure !== 0 &&
|
|
47
|
+
user.Value.RiskScore !== 0
|
|
48
|
+
);
|
|
49
|
+
|
|
50
|
+
let finalActiveUsers = [];
|
|
51
|
+
const invalidCidsToLog = [];
|
|
52
|
+
|
|
53
|
+
if (userType === 'speculator') {
|
|
54
|
+
const publicUserCids = new Set(publicUsers.map(u => u.CID));
|
|
55
|
+
|
|
56
|
+
if (publicUserCids.size > 0 && publicUserCids.size < cids.length) {
|
|
57
|
+
const privateUserCids = cids.filter(cid => !publicUserCids.has(cid));
|
|
58
|
+
invalidCidsToLog.push(...privateUserCids);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
const activeUserCids = new Set(preliminaryActiveUsers.map(u => u.CID));
|
|
62
|
+
const inactiveUserCids = publicUsers
|
|
63
|
+
.filter(u => !activeUserCids.has(u.CID))
|
|
64
|
+
.map(u => u.CID);
|
|
65
|
+
invalidCidsToLog.push(...inactiveUserCids);
|
|
66
|
+
|
|
67
|
+
logger.log('INFO', `[DISCOVER] Applying new speculator pre-filter to ${preliminaryActiveUsers.length} active users.`);
|
|
68
|
+
const nonSpeculatorCids = [];
|
|
69
|
+
|
|
70
|
+
for (const user of preliminaryActiveUsers) {
|
|
71
|
+
const v = user.Value;
|
|
72
|
+
const totalLeverage = (v.MediumLeveragePct || 0) + (v.HighLeveragePct || 0);
|
|
73
|
+
|
|
74
|
+
const isLikelySpeculator = (
|
|
75
|
+
(v.Trades || 0) > 500 ||
|
|
76
|
+
(v.TotalTradedInstruments || 0) > 50 ||
|
|
77
|
+
totalLeverage > 50 ||
|
|
78
|
+
(v.WeeklyDD || 0) < -25
|
|
79
|
+
);
|
|
80
|
+
|
|
81
|
+
if (isLikelySpeculator) {
|
|
82
|
+
finalActiveUsers.push(user);
|
|
83
|
+
} else {
|
|
84
|
+
nonSpeculatorCids.push(user.CID);
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
invalidCidsToLog.push(...nonSpeculatorCids);
|
|
89
|
+
logger.log('INFO', `[DISCOVER] Pre-filter complete. ${finalActiveUsers.length} users passed. ${nonSpeculatorCids.length} users failed heuristic.`);
|
|
90
|
+
|
|
91
|
+
if (invalidCidsToLog.length > 0) {
|
|
92
|
+
await clients.pubsub.topic(config.PUBSUB_TOPIC_INVALID_SPECULATOR_LOG)
|
|
93
|
+
.publishMessage({ json: { invalidCids: invalidCidsToLog } });
|
|
94
|
+
logger.log('INFO', `[DISCOVER] Reported ${invalidCidsToLog.length} invalid (private, inactive, or failed heuristic) speculator IDs.`);
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
} else { // 'normal' users
|
|
98
|
+
finalActiveUsers = preliminaryActiveUsers;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
if (finalActiveUsers.length > 0) {
|
|
102
|
+
const verificationTask = {
|
|
103
|
+
type: 'verify',
|
|
104
|
+
users: finalActiveUsers.map(u => ({ cid: u.CID, isBronze: u.Value.IsBronze })),
|
|
105
|
+
blockId,
|
|
106
|
+
instrument,
|
|
107
|
+
userType
|
|
108
|
+
};
|
|
109
|
+
await clients.pubsub.topic(config.PUBSUB_TOPIC_USER_FETCH)
|
|
110
|
+
.publishMessage({ json: verificationTask });
|
|
111
|
+
logger.log('INFO', `[DISCOVER] Verification message published was : ${JSON.stringify(verificationTask)} `);
|
|
112
|
+
logger.log('INFO', `[DISCOVER] Chaining to 'verify' task for ${finalActiveUsers.length} active users.`);
|
|
113
|
+
}
|
|
114
|
+
// --- END REFACTORED LOGIC ---
|
|
115
|
+
|
|
116
|
+
} finally {
|
|
117
|
+
if (selectedHeader) clients.headerManager.updatePerformance(selectedHeader.id, wasSuccess);
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
module.exports = { handleDiscover };
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
// bulltrackers-module/functions/task-engine/helpers/index.js
|
|
2
|
+
const { handleDiscover } = require('./discover_helpers');
|
|
3
|
+
const { handleVerify } = require('./verify_helpers');
|
|
4
|
+
const { handleUpdate } = require('./update_helpers');
|
|
5
|
+
|
|
6
|
+
module.exports = {
|
|
7
|
+
handleDiscover,
|
|
8
|
+
handleVerify,
|
|
9
|
+
handleUpdate,
|
|
10
|
+
};
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Helper function for the 'update' task.
|
|
3
|
+
*/
|
|
4
|
+
const { FieldValue } = require('@google-cloud/firestore');
|
|
5
|
+
const { logger } = require("sharedsetup")(__filename);
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Handles the 'update' task.
|
|
9
|
+
* @param {object} task The Pub/Sub task payload.
|
|
10
|
+
* @param {string} taskId A unique ID for logging.
|
|
11
|
+
* @param {object} clients Contains { headerManager, proxyManager, nativeFirestore, batchManager }.
|
|
12
|
+
* @param {object} config The configuration object.
|
|
13
|
+
*/
|
|
14
|
+
async function handleUpdate(task, taskId, clients, config) {
|
|
15
|
+
const { userId, instrumentId, userType } = task;
|
|
16
|
+
const selectedHeader = await clients.headerManager.selectHeader();
|
|
17
|
+
if (!selectedHeader) throw new Error("Could not select a header.");
|
|
18
|
+
|
|
19
|
+
let wasSuccess = false;
|
|
20
|
+
try {
|
|
21
|
+
const url = userType === 'speculator'
|
|
22
|
+
? `${config.ETORO_API_POSITIONS_URL}?cid=${userId}&InstrumentID=${instrumentId}`
|
|
23
|
+
: `${config.ETORO_API_PORTFOLIO_URL}?cid=${userId}`;
|
|
24
|
+
|
|
25
|
+
logger.log('INFO', `[UPDATE] Fetching portfolio for user ${userId} (${userType} with url ${url})`);
|
|
26
|
+
const { response } = await clients.proxyManager.fetch(url, { headers: selectedHeader.headers });
|
|
27
|
+
const responseBody = await response.text();
|
|
28
|
+
|
|
29
|
+
if (responseBody.includes("user is PRIVATE")) {
|
|
30
|
+
logger.log('WARN', `User ${userId} is private. Removing from future updates and decrementing block count.`);
|
|
31
|
+
|
|
32
|
+
// Use the batch manager to delete the timestamp from the pending batch
|
|
33
|
+
clients.batchManager.deleteFromTimestampBatch(userId, userType, instrumentId);
|
|
34
|
+
|
|
35
|
+
const blockId = `${Math.floor(parseInt(userId) / 1000000)}M`;
|
|
36
|
+
const blockCountsRef = clients.nativeFirestore.doc(userType === 'speculator'
|
|
37
|
+
? config.FIRESTORE_DOC_SPECULATOR_BLOCK_COUNTS
|
|
38
|
+
: config.FIRESTORE_DOC_BLOCK_COUNTS);
|
|
39
|
+
|
|
40
|
+
const incrementField = userType === 'speculator'
|
|
41
|
+
? `counts.${instrumentId}_${blockId}`
|
|
42
|
+
: `counts.${blockId}`;
|
|
43
|
+
|
|
44
|
+
await blockCountsRef.set({ [incrementField]: FieldValue.increment(-1) }, { merge: true });
|
|
45
|
+
|
|
46
|
+
return;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
if (!response.ok) {
|
|
50
|
+
throw new Error(`API Error: Status ${response.status}`);
|
|
51
|
+
}
|
|
52
|
+
wasSuccess = true;
|
|
53
|
+
const portfolioData = JSON.parse(responseBody);
|
|
54
|
+
|
|
55
|
+
const today = new Date().toISOString().slice(0, 10);
|
|
56
|
+
const blockId = `${Math.floor(parseInt(userId) / 1000000)}M`;
|
|
57
|
+
|
|
58
|
+
// Use the batch manager to add portfolio data
|
|
59
|
+
await clients.batchManager.addToPortfolioBatch(userId, blockId, today, portfolioData, userType, instrumentId);
|
|
60
|
+
|
|
61
|
+
// --- DEPRECATED LOGIC ---
|
|
62
|
+
// The PreCompute system is deprecated. All related logic is removed.
|
|
63
|
+
// --- END DEPRECATED LOGIC ---
|
|
64
|
+
|
|
65
|
+
// Use the batch manager to update the timestamp
|
|
66
|
+
await clients.batchManager.updateUserTimestamp(userId, userType, instrumentId);
|
|
67
|
+
|
|
68
|
+
} finally {
|
|
69
|
+
if (selectedHeader) clients.headerManager.updatePerformance(selectedHeader.id, wasSuccess);
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
module.exports = { handleUpdate };
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Helper function for the 'verify' task.
|
|
3
|
+
*/
|
|
4
|
+
const { FieldValue } = require('@google-cloud/firestore');
|
|
5
|
+
const { logger } = require("sharedsetup")(__filename);
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Handles the 'verify' task.
|
|
9
|
+
* @param {object} task The Pub/Sub task payload.
|
|
10
|
+
* @param {string} taskId A unique ID for logging.
|
|
11
|
+
* @param {object} clients Contains { headerManager, proxyManager, nativeFirestore }.
|
|
12
|
+
* @param {object} config The configuration object.
|
|
13
|
+
*/
|
|
14
|
+
async function handleVerify(task, taskId, clients, config) {
|
|
15
|
+
const { users, blockId, instrument, userType } = task;
|
|
16
|
+
const batch = clients.nativeFirestore.batch();
|
|
17
|
+
let validUserCount = 0;
|
|
18
|
+
const speculatorUpdates = {};
|
|
19
|
+
const normalUserUpdates = {};
|
|
20
|
+
const bronzeStateUpdates = {};
|
|
21
|
+
|
|
22
|
+
// Create the set from the config array for efficient lookups
|
|
23
|
+
const speculatorInstrumentSet = new Set(config.SPECULATOR_INSTRUMENTS_ARRAY);
|
|
24
|
+
|
|
25
|
+
for (const user of users) {
|
|
26
|
+
const userId = user.cid;
|
|
27
|
+
|
|
28
|
+
const portfolioUrl = `${config.ETORO_API_PORTFOLIO_URL}?cid=${userId}`;
|
|
29
|
+
const selectedHeader = await clients.headerManager.selectHeader();
|
|
30
|
+
if (!selectedHeader) continue;
|
|
31
|
+
|
|
32
|
+
let wasSuccess = false;
|
|
33
|
+
try {
|
|
34
|
+
const { response } = await clients.proxyManager.fetch(portfolioUrl, { headers: selectedHeader.headers });
|
|
35
|
+
if (!response.ok) continue;
|
|
36
|
+
wasSuccess = true;
|
|
37
|
+
|
|
38
|
+
const portfolioData = await response.json();
|
|
39
|
+
|
|
40
|
+
if (userType === 'speculator') {
|
|
41
|
+
const matchingInstruments = portfolioData.AggregatedPositions
|
|
42
|
+
.map(p => p.InstrumentID)
|
|
43
|
+
.filter(id => speculatorInstrumentSet.has(id)); // Use the Set
|
|
44
|
+
|
|
45
|
+
if (matchingInstruments.length > 0) {
|
|
46
|
+
speculatorUpdates[`users.${userId}`] = {
|
|
47
|
+
instruments: matchingInstruments,
|
|
48
|
+
lastVerified: new Date(),
|
|
49
|
+
lastHeldSpeculatorAsset: new Date()
|
|
50
|
+
};
|
|
51
|
+
bronzeStateUpdates[userId] = user.isBronze;
|
|
52
|
+
logger.log('INFO', `[VERIFY] Speculator user ${userId} holds speculator assets: ${matchingInstruments.join(', ')}`);
|
|
53
|
+
validUserCount++;
|
|
54
|
+
}
|
|
55
|
+
else {
|
|
56
|
+
logger.log('INFO', `[VERIFY] Speculator user ${userId} does not hold any speculator assets.`);
|
|
57
|
+
}
|
|
58
|
+
} else { // Normal user verification
|
|
59
|
+
normalUserUpdates[`users.${userId}`] = {
|
|
60
|
+
lastVerified: new Date()
|
|
61
|
+
};
|
|
62
|
+
bronzeStateUpdates[userId] = user.isBronze;
|
|
63
|
+
validUserCount++;
|
|
64
|
+
}
|
|
65
|
+
} finally {
|
|
66
|
+
if (selectedHeader) clients.headerManager.updatePerformance(selectedHeader.id, wasSuccess);
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
if (Object.keys(speculatorUpdates).length > 0 || Object.keys(normalUserUpdates).length > 0) {
|
|
71
|
+
if (userType === 'speculator') {
|
|
72
|
+
const speculatorBlockRef = clients.nativeFirestore.collection(config.FIRESTORE_COLLECTION_SPECULATOR_BLOCKS).doc(String(blockId));
|
|
73
|
+
batch.set(speculatorBlockRef, speculatorUpdates, { merge: true });
|
|
74
|
+
const bronzeStateRef = clients.nativeFirestore.collection(config.FIRESTORE_COLLECTION_BRONZE_SPECULATORS).doc(String(blockId));
|
|
75
|
+
batch.set(bronzeStateRef, bronzeStateUpdates, {merge: true});
|
|
76
|
+
|
|
77
|
+
} else {
|
|
78
|
+
const normalBlockRef = clients.nativeFirestore.collection(config.FIRESTORE_COLLECTION_NORMAL_PORTFOLIOS).doc(String(blockId));
|
|
79
|
+
batch.set(normalBlockRef, normalUserUpdates, { merge: true });
|
|
80
|
+
const bronzeStateRef = clients.nativeFirestore.collection(config.FIRESTORE_COLLECTION_BRONZE_NORMAL).doc(String(blockId));
|
|
81
|
+
batch.set(bronzeStateRef, bronzeStateUpdates, {merge: true});
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
if (validUserCount > 0) {
|
|
85
|
+
const blockCountsRef = clients.nativeFirestore.doc(userType === 'speculator'
|
|
86
|
+
? config.FIRESTORE_DOC_SPECULATOR_BLOCK_COUNTS
|
|
87
|
+
: config.FIRESTORE_DOC_BLOCK_COUNTS);
|
|
88
|
+
|
|
89
|
+
const incrementField = userType === 'speculator' ? `counts.${instrument}_${blockId}` : `counts.${blockId}`;
|
|
90
|
+
batch.set(blockCountsRef, { [incrementField]: FieldValue.increment(validUserCount) }, { merge: true });
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
await batch.commit();
|
|
95
|
+
if(validUserCount > 0) {
|
|
96
|
+
logger.log('INFO', `[VERIFY] Verified and stored ${validUserCount} new ${userType} users.`);
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
module.exports = { handleVerify };
|
|
File without changes
|
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Utility class to manage all stateful Firestore write batches
|
|
3
|
+
* for the Task Engine. This includes sharding portfolios, batching timestamp
|
|
4
|
+
* updates, and flushing processed speculator CIDs from the pending list.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
const { FieldValue } = require('@google-cloud/firestore');
|
|
8
|
+
const { logger } = require("sharedsetup")(__filename);
|
|
9
|
+
|
|
10
|
+
class FirestoreBatchManager {
|
|
11
|
+
/**
|
|
12
|
+
* @param {Firestore} firestore A Firestore instance.
|
|
13
|
+
* @param {IntelligentHeaderManager} headerManager An IntelligentHeaderManager instance.
|
|
14
|
+
* @param {object} config Configuration object.
|
|
15
|
+
*/
|
|
16
|
+
constructor(firestore, headerManager, config) {
|
|
17
|
+
this.firestore = firestore;
|
|
18
|
+
this.headerManager = headerManager;
|
|
19
|
+
this.config = config; // { TASK_ENGINE_MAX_BATCH_SIZE, TASK_ENGINE_FLUSH_INTERVAL_MS, TASK_ENGINE_MAX_USERS_PER_SHARD, ...collectionNames }
|
|
20
|
+
|
|
21
|
+
this.portfolioBatch = {};
|
|
22
|
+
this.timestampBatch = {};
|
|
23
|
+
this.processedSpeculatorCids = new Set();
|
|
24
|
+
this.batchTimeout = null;
|
|
25
|
+
|
|
26
|
+
logger.log('INFO', 'FirestoreBatchManager initialized.');
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Schedules a flush if one isn't already scheduled.
|
|
31
|
+
*/
|
|
32
|
+
_scheduleFlush() {
|
|
33
|
+
if (!this.batchTimeout) {
|
|
34
|
+
this.batchTimeout = setTimeout(
|
|
35
|
+
() => this.flushBatches(),
|
|
36
|
+
this.config.TASK_ENGINE_FLUSH_INTERVAL_MS
|
|
37
|
+
);
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Adds a portfolio to the batch.
|
|
43
|
+
* @param {string} userId
|
|
44
|
+
* @param {string} blockId
|
|
45
|
+
* @param {string} date
|
|
46
|
+
* @param {object} portfolioData
|
|
47
|
+
* @param {string} userType
|
|
48
|
+
* @param {string|null} instrumentId
|
|
49
|
+
*/
|
|
50
|
+
async addToPortfolioBatch(userId, blockId, date, portfolioData, userType, instrumentId = null) {
|
|
51
|
+
const collection = userType === 'speculator'
|
|
52
|
+
? this.config.FIRESTORE_COLLECTION_SPECULATOR_PORTFOLIOS
|
|
53
|
+
: this.config.FIRESTORE_COLLECTION_NORMAL_PORTFOLIOS;
|
|
54
|
+
const basePath = `${collection}/${blockId}/snapshots/${date}`;
|
|
55
|
+
|
|
56
|
+
if (!this.portfolioBatch[basePath]) {
|
|
57
|
+
this.portfolioBatch[basePath] = {};
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
this.portfolioBatch[basePath][userId] = portfolioData;
|
|
61
|
+
|
|
62
|
+
const totalUsersInBatch = Object.values(this.portfolioBatch).reduce((sum, users) => sum + Object.keys(users).length, 0);
|
|
63
|
+
|
|
64
|
+
if (totalUsersInBatch >= this.config.TASK_ENGINE_MAX_BATCH_SIZE) {
|
|
65
|
+
await this.flushBatches();
|
|
66
|
+
} else {
|
|
67
|
+
this._scheduleFlush();
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Adds a user timestamp update to the batch.
|
|
73
|
+
* @param {string} userId
|
|
74
|
+
* @param {string} userType
|
|
75
|
+
* @param {string|null} instrumentId
|
|
76
|
+
*/
|
|
77
|
+
async updateUserTimestamp(userId, userType, instrumentId = null) {
|
|
78
|
+
const collection = userType === 'speculator'
|
|
79
|
+
? this.config.FIRESTORE_COLLECTION_SPECULATOR_PORTFOLIOS
|
|
80
|
+
: this.config.FIRESTORE_COLLECTION_NORMAL_PORTFOLIOS;
|
|
81
|
+
const docId = userType === 'speculator' ? 'speculators' : 'normal';
|
|
82
|
+
const docPath = `${collection}/${docId}`;
|
|
83
|
+
|
|
84
|
+
if (!this.timestampBatch[docPath]) {
|
|
85
|
+
this.timestampBatch[docPath] = {};
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
const timestampKey = userType === 'speculator' ? `${userId}_${instrumentId}` : userId;
|
|
89
|
+
this.timestampBatch[docPath][timestampKey] = new Date();
|
|
90
|
+
|
|
91
|
+
if (Object.keys(this.timestampBatch[docPath]).length >= this.config.TASK_ENGINE_MAX_BATCH_SIZE) {
|
|
92
|
+
await this.flushBatches();
|
|
93
|
+
} else {
|
|
94
|
+
this._scheduleFlush();
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
/**
|
|
99
|
+
* Removes a user timestamp from the batch (e.g., if user is private).
|
|
100
|
+
* @param {string} userId
|
|
101
|
+
* @param {string} userType
|
|
102
|
+
* @param {string|null} instrumentId
|
|
103
|
+
*/
|
|
104
|
+
deleteFromTimestampBatch(userId, userType, instrumentId) {
|
|
105
|
+
const collection = userType === 'speculator'
|
|
106
|
+
? this.config.FIRESTORE_COLLECTION_SPECULATOR_PORTFOLIOS
|
|
107
|
+
: this.config.FIRESTORE_COLLECTION_NORMAL_PORTFOLIOS;
|
|
108
|
+
const docId = userType === 'speculator' ? 'speculators' : 'normal';
|
|
109
|
+
const docPath = `${collection}/${docId}`;
|
|
110
|
+
|
|
111
|
+
if (this.timestampBatch[docPath]) {
|
|
112
|
+
const timestampKey = userType === 'speculator' ? `${userId}_${instrumentId}` : userId;
|
|
113
|
+
delete this.timestampBatch[docPath][timestampKey];
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
/**
|
|
118
|
+
* Adds discovered speculator CIDs to the in-memory set for later deletion.
|
|
119
|
+
* @param {Array<string>} cids
|
|
120
|
+
*/
|
|
121
|
+
addProcessedSpeculatorCids(cids) {
|
|
122
|
+
cids.forEach(cid => this.processedSpeculatorCids.add(cid));
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Flushes all pending writes to Firestore and updates header performance.
|
|
127
|
+
*/
|
|
128
|
+
async flushBatches() {
|
|
129
|
+
if (this.batchTimeout) {
|
|
130
|
+
clearTimeout(this.batchTimeout);
|
|
131
|
+
this.batchTimeout = null;
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
const promises = [];
|
|
135
|
+
|
|
136
|
+
// --- REVISED SHARDING AND FLUSH LOGIC ---
|
|
137
|
+
const firestoreBatch = this.firestore.batch();
|
|
138
|
+
let batchOperationCount = 0;
|
|
139
|
+
|
|
140
|
+
for (const basePath in this.portfolioBatch) {
|
|
141
|
+
const userPortfolios = this.portfolioBatch[basePath];
|
|
142
|
+
const userIds = Object.keys(userPortfolios);
|
|
143
|
+
|
|
144
|
+
if (userIds.length > 0) {
|
|
145
|
+
for (let i = 0; i < userIds.length; i += this.config.TASK_ENGINE_MAX_USERS_PER_SHARD) {
|
|
146
|
+
const chunkUserIds = userIds.slice(i, i + this.config.TASK_ENGINE_MAX_USERS_PER_SHARD);
|
|
147
|
+
const shardIndex = Math.floor(i / this.config.TASK_ENGINE_MAX_USERS_PER_SHARD);
|
|
148
|
+
|
|
149
|
+
const chunkData = {};
|
|
150
|
+
chunkUserIds.forEach(userId => {
|
|
151
|
+
chunkData[userId] = userPortfolios[userId];
|
|
152
|
+
});
|
|
153
|
+
|
|
154
|
+
const docRef = this.firestore.collection(`${basePath}/parts`).doc(`part_${shardIndex}`);
|
|
155
|
+
firestoreBatch.set(docRef, chunkData, { merge: true });
|
|
156
|
+
batchOperationCount++;
|
|
157
|
+
}
|
|
158
|
+
logger.log('INFO', `[BATCH] Staged ${userIds.length} users into ${Math.ceil(userIds.length / this.config.TASK_ENGINE_MAX_USERS_PER_SHARD)} shards for ${basePath}.`);
|
|
159
|
+
}
|
|
160
|
+
delete this.portfolioBatch[basePath];
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// Process timestamp updates
|
|
164
|
+
for (const docPath in this.timestampBatch) {
|
|
165
|
+
if (Object.keys(this.timestampBatch[docPath]).length > 0) {
|
|
166
|
+
const [collection] = docPath.split('/');
|
|
167
|
+
const docRef = this.firestore.collection(collection).doc('timestamps').collection('users').doc('normal');
|
|
168
|
+
|
|
169
|
+
if (batchOperationCount < 450) { // Keep buffer
|
|
170
|
+
firestoreBatch.set(docRef, { users: this.timestampBatch[docPath] }, { merge: true });
|
|
171
|
+
batchOperationCount++;
|
|
172
|
+
} else {
|
|
173
|
+
promises.push(firestoreBatch.commit());
|
|
174
|
+
const newBatch = this.firestore.batch();
|
|
175
|
+
newBatch.set(docRef, { users: this.timestampBatch[docPath] }, { merge: true });
|
|
176
|
+
promises.push(newBatch.commit());
|
|
177
|
+
batchOperationCount = 1; // Reset count
|
|
178
|
+
}
|
|
179
|
+
delete this.timestampBatch[docPath];
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
// --- Logic for flushing processed speculator CIDs ---
|
|
184
|
+
if (this.processedSpeculatorCids.size > 0) {
|
|
185
|
+
logger.log('INFO', `[BATCH] Flushing ${this.processedSpeculatorCids.size} processed speculator CIDs from pending list documents.`);
|
|
186
|
+
const cidsToDelete = Array.from(this.processedSpeculatorCids);
|
|
187
|
+
this.processedSpeculatorCids.clear(); // Clear immediately
|
|
188
|
+
|
|
189
|
+
const pendingDocsSnapshot = await this.firestore.collection(this.config.PENDING_SPECULATORS_COLLECTION).get();
|
|
190
|
+
|
|
191
|
+
if (!pendingDocsSnapshot.empty) {
|
|
192
|
+
const deletePromises = [];
|
|
193
|
+
pendingDocsSnapshot.forEach(doc => {
|
|
194
|
+
const docData = doc.data().users || {};
|
|
195
|
+
const cidsInThisDoc = cidsToDelete.filter(cid => docData.hasOwnProperty(cid));
|
|
196
|
+
|
|
197
|
+
if (cidsInThisDoc.length > 0) {
|
|
198
|
+
const deleteBatch = this.firestore.batch();
|
|
199
|
+
const updates = {};
|
|
200
|
+
cidsInThisDoc.forEach(cid => {
|
|
201
|
+
updates[`users.${cid}`] = FieldValue.delete();
|
|
202
|
+
});
|
|
203
|
+
deleteBatch.update(doc.ref, updates);
|
|
204
|
+
deletePromises.push(deleteBatch.commit());
|
|
205
|
+
logger.log('INFO', `[BATCH] Staged deletion of ${cidsInThisDoc.length} CIDs from document: ${doc.id}`);
|
|
206
|
+
}
|
|
207
|
+
});
|
|
208
|
+
promises.push(...deletePromises);
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
// --- End new logic ---
|
|
212
|
+
|
|
213
|
+
if (batchOperationCount > 0) {
|
|
214
|
+
promises.push(firestoreBatch.commit());
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
// Flush header performance updates
|
|
218
|
+
promises.push(this.headerManager.flushPerformanceUpdates());
|
|
219
|
+
|
|
220
|
+
await Promise.all(promises);
|
|
221
|
+
logger.log('INFO', '[BATCH] All batches flushed successfully.');
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
module.exports = { FirestoreBatchManager };
|
|
File without changes
|
package/index.js
CHANGED
|
@@ -1,16 +1,15 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* @fileoverview Main entry point for the
|
|
3
|
-
*
|
|
2
|
+
* @fileoverview Main entry point for the Bulltrackers shared module.
|
|
3
|
+
* This module consolidates core utilities and abstracted function logic
|
|
4
|
+
* to be shared across multiple Cloud Functions.
|
|
4
5
|
*/
|
|
5
6
|
|
|
6
|
-
const
|
|
7
|
-
const
|
|
7
|
+
const core = require('./functions/core/utils');
|
|
8
|
+
const Orchestrator = require('./functions/orchestrator');
|
|
9
|
+
const TaskEngine = require('./functions/task-engine'); // <-- ADD THIS
|
|
8
10
|
|
|
9
11
|
module.exports = {
|
|
10
|
-
core
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
},
|
|
14
|
-
Orchestrator: orchestrator,
|
|
15
|
-
// Add other function modules here as they are refactored (e.g., TaskEngine, ComputationSystem)
|
|
12
|
+
core,
|
|
13
|
+
Orchestrator,
|
|
14
|
+
TaskEngine, // <-- AND ADD THIS
|
|
16
15
|
};
|