bulltrackers-module 1.0.179 → 1.0.181
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
* (REFACTORED: Removed all concurrency from `handleUpdate` and `lookupUsernames`)
|
|
4
4
|
* (REFACTORED: Added node-fetch fallback for all API calls)
|
|
5
5
|
* (REFACTORED: Added verbose, user-centric logging for all operations)
|
|
6
|
+
* (FIXED: Corrected variable name 'instId' to 'instrumentId' in final timestamp loops)
|
|
6
7
|
*/
|
|
7
8
|
|
|
8
9
|
const { FieldValue } = require('@google-cloud/firestore');
|
|
@@ -174,9 +175,12 @@ async function handleUpdate(task, taskId, { logger, headerManager, proxyManager,
|
|
|
174
175
|
}
|
|
175
176
|
|
|
176
177
|
// --- 5. Handle Private Users & Timestamps ---
|
|
178
|
+
// FIXED: Corrected variable naming here from 'instId' to 'instrumentId'
|
|
177
179
|
if (isPrivate) {
|
|
178
180
|
logger.log('WARN', `[handleUpdate/${userId}] Removing private user from updates.`);
|
|
179
|
-
for (const instrumentId of instrumentsToProcess) {
|
|
181
|
+
for (const instrumentId of instrumentsToProcess) {
|
|
182
|
+
await batchManager.deleteFromTimestampBatch(userId, userType, instrumentId);
|
|
183
|
+
}
|
|
180
184
|
const blockCountsRef = db.doc(config.FIRESTORE_DOC_SPECULATOR_BLOCK_COUNTS);
|
|
181
185
|
for (const instrumentId of instrumentsToProcess) {
|
|
182
186
|
const incrementField = `counts.${instrumentId}_${Math.floor(userId/1e6)*1e6}`;
|
|
@@ -186,7 +190,11 @@ async function handleUpdate(task, taskId, { logger, headerManager, proxyManager,
|
|
|
186
190
|
}
|
|
187
191
|
|
|
188
192
|
// If not private, update all timestamps
|
|
189
|
-
|
|
193
|
+
// FIXED: Corrected variable naming here from 'instId' to 'instrumentId'
|
|
194
|
+
for (const instrumentId of instrumentsToProcess) {
|
|
195
|
+
await batchManager.updateUserTimestamp(userId, userType, instrumentId);
|
|
196
|
+
}
|
|
197
|
+
|
|
190
198
|
if (userType === 'speculator') { await batchManager.addSpeculatorTimestampFix(userId, String(Math.floor(userId/1e6)*1e6)); }
|
|
191
199
|
|
|
192
200
|
logger.log('INFO', `[handleUpdate/${userId}] Update task finished successfully.`);
|
|
@@ -1,251 +1,245 @@
|
|
|
1
|
-
/**
|
|
2
|
-
@fileoverview Utility class to manage stateful Firestore write batches.
|
|
3
|
-
|
|
1
|
+
/** @fileoverview Utility class to manage stateful Firestore write batches.
|
|
4
2
|
REFACTORED: Renamed 'firestore' to 'db' for consistency.
|
|
5
|
-
|
|
6
3
|
OPTIMIZED: Added logic to handle speculator timestamp fixes within the batch.
|
|
7
|
-
|
|
8
4
|
--- MODIFIED: Added username map caching and trading history batching. ---
|
|
9
|
-
|
|
10
|
-
---
|
|
11
|
-
|
|
12
|
-
--- FIXED: Implemented deterministic sharding (part_N) to prevent document fragmentation. --- **/
|
|
5
|
+
--- FIXED: Implemented Modulo Sharding to pack sparse IDs into dense documents. ---
|
|
6
|
+
--- FIXED: Removed aggressive auto-flush timeout to prevent cost explosion. ---
|
|
7
|
+
**/
|
|
13
8
|
|
|
14
9
|
const { FieldValue } = require('@google-cloud/firestore');
|
|
15
10
|
|
|
16
|
-
class FirestoreBatchManager {
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
logger.log('INFO', 'FirestoreBatchManager initialized.');
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
/* * NEW: Checks if a user's history has been fetched in the last 10 minutes.
|
|
38
|
-
* If not, it logs them as fetched and returns false (to trigger a fetch).
|
|
39
|
-
* @param {string} userId
|
|
40
|
-
* @returns {boolean} True if already fetched, false if not.
|
|
41
|
-
*/
|
|
42
|
-
checkAndSetHistoryFetched(userId) {
|
|
43
|
-
// Check if the cache is stale
|
|
44
|
-
if (Date.now() - this.historyCacheTimestamp > this.HISTORY_CACHE_TTL_MS) {
|
|
45
|
-
this.logger.log('INFO', '[BATCH] History fetch cache (10m TTL) expired. Clearing set.');
|
|
46
|
-
this.historyFetchedUserIds.clear();
|
|
11
|
+
class FirestoreBatchManager {
|
|
12
|
+
constructor(db, headerManager, logger, config) {
|
|
13
|
+
this.db = db;
|
|
14
|
+
this.headerManager = headerManager;
|
|
15
|
+
this.logger = logger;
|
|
16
|
+
this.config = config;
|
|
17
|
+
this.portfolioBatch = {};
|
|
18
|
+
this.timestampBatch = {};
|
|
19
|
+
this.tradingHistoryBatch = {};
|
|
20
|
+
this.speculatorTimestampFixBatch = {};
|
|
21
|
+
|
|
22
|
+
// Username map cache
|
|
23
|
+
this.usernameMap = new Map();
|
|
24
|
+
this.usernameMapUpdates = {};
|
|
25
|
+
this.usernameMapLastLoaded = 0;
|
|
26
|
+
|
|
27
|
+
// History fetch cache (NEW)
|
|
28
|
+
this.historyFetchedUserIds = new Set();
|
|
47
29
|
this.historyCacheTimestamp = Date.now();
|
|
30
|
+
this.HISTORY_CACHE_TTL_MS = config.HISTORY_CACHE_TTL_MS || 600000;
|
|
31
|
+
|
|
32
|
+
this.processedSpeculatorCids = new Set();
|
|
33
|
+
this.usernameMapCollectionName = config.FIRESTORE_COLLECTION_USERNAME_MAP;
|
|
34
|
+
this.normalHistoryCollectionName = config.FIRESTORE_COLLECTION_NORMAL_HISTORY;
|
|
35
|
+
this.speculatorHistoryCollectionName = config.FIRESTORE_COLLECTION_SPECULATOR_HISTORY;
|
|
36
|
+
this.batchTimeout = null;
|
|
37
|
+
logger.log('INFO', 'FirestoreBatchManager initialized.');
|
|
48
38
|
}
|
|
49
39
|
|
|
50
|
-
|
|
51
|
-
|
|
40
|
+
checkAndSetHistoryFetched(userId) {
|
|
41
|
+
if (Date.now() - this.historyCacheTimestamp > this.HISTORY_CACHE_TTL_MS) {
|
|
42
|
+
this.logger.log('INFO', '[BATCH] History fetch cache (10m TTL) expired. Clearing set.');
|
|
43
|
+
this.historyFetchedUserIds.clear();
|
|
44
|
+
this.historyCacheTimestamp = Date.now();
|
|
45
|
+
}
|
|
46
|
+
if (this.historyFetchedUserIds.has(userId)) { return true; }
|
|
47
|
+
this.historyFetchedUserIds.add(userId);
|
|
48
|
+
return false;
|
|
52
49
|
}
|
|
53
50
|
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
51
|
+
_getUsernameShardId(cid) { return `cid_map_shard_${Math.floor(parseInt(cid) / 10000) % 10}`; }
|
|
52
|
+
|
|
53
|
+
// --- CRITICAL FIX: Removed aggressive timeout flush ---
|
|
54
|
+
// With sequential processing, the timer was firing too often, causing 1 write per user (expensive).
|
|
55
|
+
// Now we only flush if we hit the memory limit (MAX_BATCH_SIZE) or when explicitly called at the end.
|
|
56
|
+
_scheduleFlush() {
|
|
57
|
+
const maxBatch = this.config.TASK_ENGINE_MAX_BATCH_SIZE ? Number(this.config.TASK_ENGINE_MAX_BATCH_SIZE) : 400;
|
|
58
|
+
const totalOps = this._estimateBatchSize();
|
|
59
|
+
if (totalOps >= maxBatch) {
|
|
60
|
+
this.flushBatches();
|
|
61
|
+
return;
|
|
62
|
+
}
|
|
63
|
+
}
|
|
62
64
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
65
|
+
_estimateBatchSize() {
|
|
66
|
+
let ops = 0;
|
|
67
|
+
ops += Object.keys(this.portfolioBatch).length;
|
|
68
|
+
ops += Object.keys(this.tradingHistoryBatch).length;
|
|
69
|
+
ops += Object.keys(this.timestampBatch).length;
|
|
70
|
+
ops += Object.keys(this.speculatorTimestampFixBatch).length;
|
|
71
|
+
return ops;
|
|
72
|
+
}
|
|
68
73
|
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
74
|
+
async loadUsernameMap() {
|
|
75
|
+
if (Date.now() - this.usernameMapLastLoaded < 3600000) return;
|
|
76
|
+
this.usernameMap.clear();
|
|
77
|
+
this.logger.log('INFO', '[BATCH] Refreshing username map from Firestore...');
|
|
78
|
+
try {
|
|
79
|
+
const snapshot = await this.db.collection(this.usernameMapCollectionName).get();
|
|
80
|
+
snapshot.forEach(doc => { const data = doc.data(); for (const cid in data) if (data[cid]?.username) this.usernameMap.set(String(cid), data[cid].username); });
|
|
81
|
+
this.usernameMapLastLoaded = Date.now();
|
|
82
|
+
this.logger.log('INFO', `[BATCH] Loaded ${this.usernameMap.size} usernames.`);
|
|
83
|
+
} catch (e) { this.logger.log('ERROR', '[BATCH] Failed to load username map.', { errorMessage: e.message }); }
|
|
84
|
+
}
|
|
77
85
|
|
|
78
|
-
|
|
79
|
-
if (Date.now() - this.usernameMapLastLoaded < 3600000) return;
|
|
80
|
-
this.usernameMap.clear();
|
|
81
|
-
this.logger.log('INFO', '[BATCH] Refreshing username map from Firestore...');
|
|
82
|
-
try {
|
|
83
|
-
const snapshot = await this.db.collection(this.usernameMapCollectionName).get();
|
|
84
|
-
snapshot.forEach(doc => { const data = doc.data(); for (const cid in data) if (data[cid]?.username) this.usernameMap.set(String(cid), data[cid].username); });
|
|
85
|
-
this.usernameMapLastLoaded = Date.now();
|
|
86
|
-
this.logger.log('INFO', `[BATCH] Loaded ${this.usernameMap.size} usernames.`);
|
|
87
|
-
} catch (e) { this.logger.log('ERROR', '[BATCH] Failed to load username map.', { errorMessage: e.message }); }
|
|
88
|
-
}
|
|
86
|
+
getUsername(cid) { return this.usernameMap.get(String(cid)); }
|
|
89
87
|
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
this.logger.log('TRACE', `[BATCH] Queued username update for ${cidStr} in ${shardId}.`);
|
|
100
|
-
this._scheduleFlush();
|
|
101
|
-
}
|
|
88
|
+
addUsernameMapUpdate(cid, username) {
|
|
89
|
+
if (!username) return;
|
|
90
|
+
const cidStr = String(cid);
|
|
91
|
+
this.usernameMap.set(cidStr, username);
|
|
92
|
+
const shardId = this._getUsernameShardId(cidStr);
|
|
93
|
+
if (!this.usernameMapUpdates[shardId]) { this.usernameMapUpdates[shardId] = {}; }
|
|
94
|
+
this.usernameMapUpdates[shardId][cidStr] = { username };
|
|
95
|
+
this._scheduleFlush();
|
|
96
|
+
}
|
|
102
97
|
|
|
103
|
-
async addToTradingHistoryBatch(userId, blockId, date, historyData, userType) {
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
}
|
|
98
|
+
async addToTradingHistoryBatch(userId, blockId, date, historyData, userType) {
|
|
99
|
+
const collection = userType === 'speculator' ? this.speculatorHistoryCollectionName : this.normalHistoryCollectionName;
|
|
100
|
+
const path = `${collection}/${blockId}/snapshots/${date}`;
|
|
101
|
+
this.tradingHistoryBatch[path] ??= {};
|
|
102
|
+
this.tradingHistoryBatch[path][userId] = historyData;
|
|
103
|
+
this._scheduleFlush();
|
|
104
|
+
}
|
|
110
105
|
|
|
111
|
-
async addToPortfolioBatch(userId, blockId, date, portfolioData, userType) {
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
}
|
|
106
|
+
async addToPortfolioBatch(userId, blockId, date, portfolioData, userType) {
|
|
107
|
+
const collection = userType === 'speculator' ? this.config.FIRESTORE_COLLECTION_SPECULATOR_PORTFOLIOS : this.config.FIRESTORE_COLLECTION_NORMAL_PORTFOLIOS;
|
|
108
|
+
const path = `${collection}/${blockId}/snapshots/${date}`;
|
|
109
|
+
this.portfolioBatch[path] ??= {};
|
|
110
|
+
this.portfolioBatch[path][userId] = portfolioData;
|
|
111
|
+
this._scheduleFlush();
|
|
112
|
+
}
|
|
118
113
|
|
|
119
|
-
async updateUserTimestamp(userId, userType, instrumentId = null) {
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
}
|
|
114
|
+
async updateUserTimestamp(userId, userType, instrumentId = null) {
|
|
115
|
+
const collection = userType === 'speculator' ? this.config.FIRESTORE_COLLECTION_SPECULATOR_PORTFOLIOS : this.config.FIRESTORE_COLLECTION_NORMAL_PORTFOLIOS;
|
|
116
|
+
const docPath = `${collection}/${userType === 'speculator' ? 'speculators' : 'normal'}`;
|
|
117
|
+
this.timestampBatch[docPath] ??= {};
|
|
118
|
+
const key = userType === 'speculator' ? `${userId}_${instrumentId}` : userId;
|
|
119
|
+
this.timestampBatch[docPath][key] = new Date();
|
|
120
|
+
this._scheduleFlush();
|
|
121
|
+
}
|
|
127
122
|
|
|
128
|
-
deleteFromTimestampBatch(userId, userType, instrumentId) {
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
}
|
|
123
|
+
deleteFromTimestampBatch(userId, userType, instrumentId) {
|
|
124
|
+
const collection = userType === 'speculator' ? this.config.FIRESTORE_COLLECTION_SPECULATOR_PORTFOLIOS : this.config.FIRESTORE_COLLECTION_NORMAL_PORTFOLIOS;
|
|
125
|
+
const docPath = `${collection}/${userType === 'speculator' ? 'speculators' : 'normal'}`;
|
|
126
|
+
if (this.timestampBatch[docPath]) { const key = userType === 'speculator' ? `${userId}_${instrumentId}` : userId; delete this.timestampBatch[docPath][key]; }
|
|
127
|
+
}
|
|
133
128
|
|
|
134
|
-
addProcessedSpeculatorCids(cids) { cids.forEach(cid => this.processedSpeculatorCids.add(cid)); }
|
|
129
|
+
addProcessedSpeculatorCids(cids) { cids.forEach(cid => this.processedSpeculatorCids.add(cid)); }
|
|
135
130
|
|
|
136
|
-
async addSpeculatorTimestampFix(userId, orchestratorBlockId) {
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
}
|
|
131
|
+
async addSpeculatorTimestampFix(userId, orchestratorBlockId) {
|
|
132
|
+
const docPath = `${this.config.FIRESTORE_COLLECTION_SPECULATOR_BLOCKS}/${orchestratorBlockId}`;
|
|
133
|
+
this.speculatorTimestampFixBatch[docPath] ??= {};
|
|
134
|
+
this.speculatorTimestampFixBatch[docPath][`users.${userId}.lastVerified`] = new Date();
|
|
135
|
+
this.speculatorTimestampFixBatch[docPath][`users.${userId}.lastHeldSpeculatorAsset`] = new Date();
|
|
136
|
+
this._scheduleFlush();
|
|
137
|
+
}
|
|
144
138
|
|
|
145
|
-
/**
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
_flushDataBatch(batchData, firestoreBatch, logName) {
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
const
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
const
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
139
|
+
/**
|
|
140
|
+
* --- REFACTORED: Modulo Sharding ---
|
|
141
|
+
* Fixes the issue where sparse IDs created fragmented documents.
|
|
142
|
+
* We now calculate the number of shards needed to hold the target population
|
|
143
|
+
* and bucket users into them using modulo arithmetic.
|
|
144
|
+
*/
|
|
145
|
+
_flushDataBatch(batchData, firestoreBatch, logName) {
|
|
146
|
+
let count = 0;
|
|
147
|
+
|
|
148
|
+
// 1. Determine Shard Strategy
|
|
149
|
+
// If we expect ~1500 users in a block and want 200 users per shard:
|
|
150
|
+
// We need ceil(1500 / 200) = 8 shards total (part_0 to part_7).
|
|
151
|
+
// Any ID, no matter how random, will map to one of these 8 buckets.
|
|
152
|
+
const TARGET_USERS = this.config.DISCOVERY_ORCHESTRATOR_TARGET_USERS_PER_BLOCK ? Number(this.config.DISCOVERY_ORCHESTRATOR_TARGET_USERS_PER_BLOCK) : 1500;
|
|
153
|
+
const SHARD_CAPACITY = this.config.TASK_ENGINE_MAX_USERS_PER_SHARD ? Number(this.config.TASK_ENGINE_MAX_USERS_PER_SHARD) : 200;
|
|
154
|
+
|
|
155
|
+
// Ensure at least 1 shard exists
|
|
156
|
+
const TOTAL_SHARDS = Math.max(1, Math.ceil(TARGET_USERS / SHARD_CAPACITY));
|
|
157
|
+
|
|
158
|
+
for (const basePath in batchData) {
|
|
159
|
+
const users = batchData[basePath];
|
|
160
|
+
const userIds = Object.keys(users);
|
|
161
|
+
if (!userIds.length) continue;
|
|
162
|
+
|
|
163
|
+
const updatesByShard = {};
|
|
164
|
+
|
|
165
|
+
for (const userId of userIds) {
|
|
166
|
+
const cid = parseInt(userId, 10);
|
|
167
|
+
let shardId;
|
|
168
|
+
|
|
169
|
+
if (!isNaN(cid)) {
|
|
170
|
+
// --- MODULO SHARDING ---
|
|
171
|
+
// Even if IDs are 10, 1000000, 500... they will round-robin into
|
|
172
|
+
// the fixed set of shards (e.g. 8 shards), ensuring density.
|
|
173
|
+
const shardIndex = cid % TOTAL_SHARDS;
|
|
174
|
+
shardId = `part_${shardIndex}`;
|
|
175
|
+
} else {
|
|
176
|
+
shardId = 'part_misc';
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
if (!updatesByShard[shardId]) {
|
|
180
|
+
updatesByShard[shardId] = {};
|
|
181
|
+
}
|
|
182
|
+
updatesByShard[shardId][userId] = users[userId];
|
|
181
183
|
}
|
|
182
184
|
|
|
183
|
-
|
|
184
|
-
updatesByShard[shardId]
|
|
185
|
+
for (const shardId in updatesByShard) {
|
|
186
|
+
const chunkData = updatesByShard[shardId];
|
|
187
|
+
const docRef = this.db.collection(`${basePath}/parts`).doc(shardId);
|
|
188
|
+
// merge: true ensures we append to the doc if it was started in a previous batch
|
|
189
|
+
firestoreBatch.set(docRef, chunkData, { merge: true });
|
|
190
|
+
count++;
|
|
185
191
|
}
|
|
186
|
-
updatesByShard[shardId][userId] = users[userId];
|
|
187
|
-
}
|
|
188
192
|
|
|
189
|
-
|
|
190
|
-
for (const shardId in updatesByShard) {
|
|
191
|
-
const chunkData = updatesByShard[shardId];
|
|
192
|
-
const docRef = this.db.collection(`${basePath}/parts`).doc(shardId);
|
|
193
|
+
this.logger.log('INFO', `[BATCH] Staged ${userIds.length} ${logName} users into ${Object.keys(updatesByShard).length} buckets (Modulo ${TOTAL_SHARDS}) for ${basePath}.`);
|
|
193
194
|
|
|
194
|
-
|
|
195
|
-
firestoreBatch.set(docRef, chunkData, { merge: true });
|
|
196
|
-
count++;
|
|
195
|
+
delete batchData[basePath];
|
|
197
196
|
}
|
|
198
|
-
|
|
199
|
-
this.logger.log('INFO', `[BATCH] Staged ${userIds.length} ${logName} users into ${Object.keys(updatesByShard).length} deterministic shards for ${basePath}.`);
|
|
200
|
-
|
|
201
|
-
delete batchData[basePath];
|
|
197
|
+
return count;
|
|
202
198
|
}
|
|
203
|
-
return count;
|
|
204
|
-
}
|
|
205
199
|
|
|
206
|
-
async flushBatches() {
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
200
|
+
async flushBatches() {
|
|
201
|
+
if (this.batchTimeout) { clearTimeout(this.batchTimeout); this.batchTimeout = null; }
|
|
202
|
+
const firestoreBatch = this.db.batch();
|
|
203
|
+
let batchOps = 0;
|
|
204
|
+
batchOps += this._flushDataBatch(this.portfolioBatch, firestoreBatch, 'Portfolio');
|
|
205
|
+
batchOps += this._flushDataBatch(this.tradingHistoryBatch, firestoreBatch, 'Trade History');
|
|
206
|
+
for (const docPath in this.timestampBatch) {
|
|
207
|
+
const timestamps = this.timestampBatch[docPath];
|
|
208
|
+
if (!Object.keys(timestamps).length) continue;
|
|
209
|
+
const docRef = this.db.collection(docPath.split('/')[0]).doc('timestamps').collection('users').doc('normal');
|
|
210
|
+
firestoreBatch.set(docRef, { users: timestamps }, { merge: true });
|
|
211
|
+
batchOps++;
|
|
212
|
+
delete this.timestampBatch[docPath];
|
|
213
|
+
}
|
|
220
214
|
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
215
|
+
for (const docPath in this.speculatorTimestampFixBatch) {
|
|
216
|
+
const updates = this.speculatorTimestampFixBatch[docPath];
|
|
217
|
+
if (!Object.keys(updates).length) continue;
|
|
218
|
+
firestoreBatch.set(this.db.doc(docPath), updates, { merge: true });
|
|
219
|
+
batchOps++;
|
|
220
|
+
delete this.speculatorTimestampFixBatch[docPath];
|
|
221
|
+
}
|
|
228
222
|
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
}
|
|
223
|
+
for (const shardId in this.usernameMapUpdates) {
|
|
224
|
+
const updates = this.usernameMapUpdates[shardId];
|
|
225
|
+
if (updates && Object.keys(updates).length > 0) { firestoreBatch.set( this.db.collection(this.usernameMapCollectionName).doc(shardId), updates, { merge: true } ); batchOps++; this.logger.log('INFO', `[BATCH] Flushing ${Object.keys(updates).length} username updates to ${shardId}.`); } }
|
|
226
|
+
this.usernameMapUpdates = {};
|
|
227
|
+
|
|
228
|
+
if (this.processedSpeculatorCids.size) {
|
|
229
|
+
const cids = Array.from(this.processedSpeculatorCids);
|
|
230
|
+
this.processedSpeculatorCids.clear();
|
|
231
|
+
const snapshot = await this.db.collection(this.config.PENDING_SPECULATORS_COLLECTION).get();
|
|
232
|
+
snapshot.forEach(doc => { const docData = doc.data().users || {}; const cidsInDoc = cids.filter(cid => docData[cid]); if (!cidsInDoc.length) return;
|
|
233
|
+
const delBatch = this.db.batch();
|
|
234
|
+
const updates = Object.fromEntries(cidsInDoc.map(cid => [`users.${cid}`, FieldValue.delete()]));
|
|
235
|
+
delBatch.update(doc.ref, updates);
|
|
236
|
+
delBatch.commit();
|
|
237
|
+
this.logger.log('INFO', `[BATCH] Deleted ${cidsInDoc.length} CIDs from ${doc.id}`); }); }
|
|
238
|
+
|
|
239
|
+
if (batchOps) await firestoreBatch.commit();
|
|
240
|
+
await this.headerManager.flushPerformanceUpdates();
|
|
241
|
+
this.logger.log('INFO', '[BATCH] All batches flushed successfully.');
|
|
242
|
+
}
|
|
249
243
|
}
|
|
250
244
|
|
|
251
|
-
module.exports = { FirestoreBatchManager };
|
|
245
|
+
module.exports = { FirestoreBatchManager };
|