bulltrackers-module 1.0.610 → 1.0.612
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/functions/api-v2/helpers/data-fetchers/firestore.js +45 -0
- package/functions/api-v2/routes/popular_investors.js +5 -0
- package/functions/api-v2/routes/sync.js +42 -4
- package/functions/computation-system/persistence/ResultCommitter.js +62 -42
- package/functions/computation-system/utils/utils.js +1 -0
- package/package.json +1 -1
|
@@ -2212,6 +2212,50 @@ const checkDataStatus = async (db, userId) => {
|
|
|
2212
2212
|
};
|
|
2213
2213
|
};
|
|
2214
2214
|
|
|
2215
|
+
/**
|
|
2216
|
+
* Check Popular Investor data status - finds the latest computation date for a PI
|
|
2217
|
+
* Similar to checkDataStatus but specifically for PopularInvestorProfileMetrics
|
|
2218
|
+
*/
|
|
2219
|
+
const checkPopularInvestorDataStatus = async (db, piId) => {
|
|
2220
|
+
const lookbackDays = 7;
|
|
2221
|
+
const today = new Date();
|
|
2222
|
+
let computationDate = null;
|
|
2223
|
+
|
|
2224
|
+
const computationName = 'PopularInvestorProfileMetrics';
|
|
2225
|
+
|
|
2226
|
+
// Check for computation results in the last 7 days
|
|
2227
|
+
for (let i = 0; i < lookbackDays; i++) {
|
|
2228
|
+
const checkDate = new Date(today);
|
|
2229
|
+
checkDate.setDate(checkDate.getDate() - i);
|
|
2230
|
+
const dateStr = checkDate.toISOString().split('T')[0];
|
|
2231
|
+
|
|
2232
|
+
try {
|
|
2233
|
+
const pageRef = db.collection('unified_insights')
|
|
2234
|
+
.doc(dateStr)
|
|
2235
|
+
.collection('results')
|
|
2236
|
+
.doc('popular-investor')
|
|
2237
|
+
.collection('computations')
|
|
2238
|
+
.doc(computationName)
|
|
2239
|
+
.collection('pages')
|
|
2240
|
+
.doc(String(piId));
|
|
2241
|
+
|
|
2242
|
+
const pageSnap = await pageRef.get();
|
|
2243
|
+
if (pageSnap.exists) {
|
|
2244
|
+
computationDate = dateStr;
|
|
2245
|
+
break;
|
|
2246
|
+
}
|
|
2247
|
+
} catch (error) {
|
|
2248
|
+
// Continue checking other dates
|
|
2249
|
+
console.error(`Error checking computation ${computationName} for ${dateStr}:`, error);
|
|
2250
|
+
}
|
|
2251
|
+
}
|
|
2252
|
+
|
|
2253
|
+
return {
|
|
2254
|
+
computationDate: computationDate,
|
|
2255
|
+
available: computationDate !== null
|
|
2256
|
+
};
|
|
2257
|
+
};
|
|
2258
|
+
|
|
2215
2259
|
const sendTestAlert = async (db, userId, payload) => {
|
|
2216
2260
|
// Create a fake notification
|
|
2217
2261
|
const id = `test_${Date.now()}`;
|
|
@@ -2539,6 +2583,7 @@ module.exports = {
|
|
|
2539
2583
|
getSyncStatus,
|
|
2540
2584
|
autoGenerateWatchlist,
|
|
2541
2585
|
checkDataStatus,
|
|
2586
|
+
checkPopularInvestorDataStatus,
|
|
2542
2587
|
sendTestAlert,
|
|
2543
2588
|
isSignedInUser,
|
|
2544
2589
|
getUserUsername,
|
|
@@ -147,11 +147,16 @@ router.get('/:piId/profile', async (req, res) => {
|
|
|
147
147
|
const computationName = 'PopularInvestorProfileMetrics';
|
|
148
148
|
const profileData = await pageCollection(db, targetDate, computationName, piId, parseInt(lookback));
|
|
149
149
|
|
|
150
|
+
// Extract computationDate from the latest data entry (first item in array, sorted by date descending)
|
|
151
|
+
// The profileData array contains { date, data } objects, sorted with latest first
|
|
152
|
+
const computationDate = profileData && profileData.length > 0 ? profileData[0].date : null;
|
|
153
|
+
|
|
150
154
|
res.json({
|
|
151
155
|
success: true,
|
|
152
156
|
computation: computationName,
|
|
153
157
|
piId: piId,
|
|
154
158
|
data: profileData,
|
|
159
|
+
computationDate: computationDate, // Latest computation date for up-to-date validation
|
|
155
160
|
profileType: 'public' // Indicates this is a public PI profile
|
|
156
161
|
});
|
|
157
162
|
} catch (error) {
|
|
@@ -6,7 +6,9 @@ const {
|
|
|
6
6
|
isDeveloper,
|
|
7
7
|
isSignedInUser,
|
|
8
8
|
fetchPopularInvestorMasterList,
|
|
9
|
-
getUserUsername
|
|
9
|
+
getUserUsername,
|
|
10
|
+
checkDataStatus,
|
|
11
|
+
checkPopularInvestorDataStatus
|
|
10
12
|
} = require('../helpers/data-fetchers/firestore.js');
|
|
11
13
|
|
|
12
14
|
const router = express.Router();
|
|
@@ -25,20 +27,56 @@ const handleSyncRequest = async (req, res) => {
|
|
|
25
27
|
const limit = await checkSyncRateLimits(db, targetId, req.targetUserId, isDev);
|
|
26
28
|
if (!limit.allowed) return res.status(429).json({ error: limit.message });
|
|
27
29
|
|
|
28
|
-
// 2. Detect User Types
|
|
30
|
+
// 2. Detect User Types (needed for validation)
|
|
29
31
|
const [isSignedIn, isPI] = await Promise.all([
|
|
30
32
|
isSignedInUser(db, targetId),
|
|
31
33
|
fetchPopularInvestorMasterList(db, String(targetId)).then(() => true).catch(() => false)
|
|
32
34
|
]);
|
|
33
35
|
|
|
36
|
+
// 3. Check if data is already up-to-date (prevent unnecessary syncs)
|
|
37
|
+
// Developers can bypass this check
|
|
38
|
+
if (!isDev) {
|
|
39
|
+
try {
|
|
40
|
+
let computationDate = null;
|
|
41
|
+
|
|
42
|
+
// Check Popular Investor data status if target is a PI (anyone can sync a PI, so always check)
|
|
43
|
+
if (isPI) {
|
|
44
|
+
const piDataStatus = await checkPopularInvestorDataStatus(db, String(targetId));
|
|
45
|
+
computationDate = piDataStatus.computationDate;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// Check signed-in user data status if target is a signed-in user syncing themselves
|
|
49
|
+
// (Only check if not already found from PI check, and only if syncing themselves)
|
|
50
|
+
if (isSignedIn && targetId === req.targetUserId && !computationDate) {
|
|
51
|
+
const userDataStatus = await checkDataStatus(db, String(targetId));
|
|
52
|
+
computationDate = userDataStatus.computationDate;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// If we found a computation date and it's today, block the sync
|
|
56
|
+
if (computationDate) {
|
|
57
|
+
const today = new Date().toISOString().split('T')[0]; // YYYY-MM-DD format
|
|
58
|
+
if (computationDate === today) {
|
|
59
|
+
return res.status(400).json({
|
|
60
|
+
error: "Data is already up-to-date for today. Sync is not needed.",
|
|
61
|
+
code: "DATA_UP_TO_DATE",
|
|
62
|
+
computationDate: computationDate
|
|
63
|
+
});
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
} catch (error) {
|
|
67
|
+
// If data status check fails, log but don't block sync (fail open for reliability)
|
|
68
|
+
console.warn(`[handleSyncRequest] Failed to check data status for ${targetId}:`, error.message);
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
34
72
|
if (!isSignedIn && !isPI) {
|
|
35
73
|
return res.status(404).json({ error: "User not found in SignedInUsers or Popular Investor master list" });
|
|
36
74
|
}
|
|
37
75
|
|
|
38
|
-
//
|
|
76
|
+
// 4. Get username
|
|
39
77
|
const username = await getUserUsername(db, targetId) || String(targetId);
|
|
40
78
|
|
|
41
|
-
//
|
|
79
|
+
// 5. Create request IDs and dispatch tasks
|
|
42
80
|
const requestIds = [];
|
|
43
81
|
const tasks = [];
|
|
44
82
|
|
|
@@ -15,6 +15,7 @@ const ContractValidator = require('./ContractValidator');
|
|
|
15
15
|
const validationOverrides = require('../config/validation_overrides');
|
|
16
16
|
const pLimit = require('p-limit');
|
|
17
17
|
const zlib = require('zlib');
|
|
18
|
+
const { commitBatchInChunks, generateDataHash, FieldValue } = require('../utils/utils');
|
|
18
19
|
|
|
19
20
|
const NON_RETRYABLE_ERRORS = [ 'PERMISSION_DENIED', 'DATA_LOSS', 'FAILED_PRECONDITION' ];
|
|
20
21
|
const SIMHASH_REGISTRY_COLLECTION = 'system_simhash_registry';
|
|
@@ -137,6 +138,8 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
137
138
|
continue;
|
|
138
139
|
}
|
|
139
140
|
|
|
141
|
+
// [NEW] Page Computation Logic (Fan-Out) with TTL
|
|
142
|
+
// Bypasses standard compression/sharding to write per-user documents
|
|
140
143
|
// [NEW] Page Computation Logic (Fan-Out) with TTL
|
|
141
144
|
// Bypasses standard compression/sharding to write per-user documents
|
|
142
145
|
if (isPageComputation && !isEmpty) {
|
|
@@ -145,26 +148,27 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
145
148
|
.collection(config.computationsSubcollection).doc(name);
|
|
146
149
|
|
|
147
150
|
// --- CLEANUP START: Remove old storage formats (Sharded/Compressed) ---
|
|
148
|
-
//
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
151
|
+
// Optimization: Only attempt cleanup on the initial write to save reads
|
|
152
|
+
if (isInitialWrite) {
|
|
153
|
+
try {
|
|
154
|
+
const docSnap = await mainDocRef.get();
|
|
155
|
+
if (docSnap.exists) {
|
|
156
|
+
const dData = docSnap.data();
|
|
157
|
+
if (dData._sharded) {
|
|
158
|
+
const shardCol = mainDocRef.collection('_shards');
|
|
159
|
+
const shardDocs = await withRetry(() => shardCol.listDocuments());
|
|
160
|
+
|
|
161
|
+
if (shardDocs.length > 0) {
|
|
162
|
+
const cleanupOps = shardDocs.map(d => ({ type: 'DELETE', ref: d }));
|
|
163
|
+
await commitBatchInChunks(config, deps, cleanupOps, `${name}::PageModeCleanup`);
|
|
164
|
+
runMetrics.io.deletes += cleanupOps.length;
|
|
165
|
+
logger.log('INFO', `[PageMode] ${name}: Cleaned up ${cleanupOps.length} old shard documents.`);
|
|
166
|
+
}
|
|
163
167
|
}
|
|
164
168
|
}
|
|
169
|
+
} catch (cleanupErr) {
|
|
170
|
+
logger.log('WARN', `[PageMode] ${name}: Cleanup warning: ${cleanupErr.message}`);
|
|
165
171
|
}
|
|
166
|
-
} catch (cleanupErr) {
|
|
167
|
-
logger.log('WARN', `[PageMode] ${name}: Cleanup warning: ${cleanupErr.message}`);
|
|
168
172
|
}
|
|
169
173
|
// --- CLEANUP END ---
|
|
170
174
|
|
|
@@ -198,31 +202,47 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
198
202
|
logger.log('INFO', `[PageMode] ${name}: Wrote ${pageWrites.length} user pages. TTL: ${ttlDays}d.`);
|
|
199
203
|
}
|
|
200
204
|
|
|
201
|
-
// 3. Write the "Header" document
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
205
|
+
// 3. Write or Update the "Header" document
|
|
206
|
+
// FIXED: Now runs on every batch to ensure counts are accumulated correctly.
|
|
207
|
+
|
|
208
|
+
const isFinalFlush = (flushMode !== 'INTERMEDIATE');
|
|
209
|
+
|
|
210
|
+
// Determine Page Count Value: Raw number for initial, Increment for updates
|
|
211
|
+
let pageCountValue = pageWrites.length;
|
|
212
|
+
if (!isInitialWrite) {
|
|
213
|
+
pageCountValue = FieldValue.increment(pageWrites.length);
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
const headerData = {
|
|
217
|
+
_isPageMode: true, // Flag for readers to know where to look
|
|
218
|
+
_pageCount: pageCountValue,
|
|
219
|
+
_lastUpdated: new Date().toISOString(),
|
|
220
|
+
_expireAt: expireAt // Ensure the header also gets deleted
|
|
221
|
+
};
|
|
222
|
+
|
|
223
|
+
// Handle Completion Status
|
|
224
|
+
if (isFinalFlush) {
|
|
225
|
+
headerData._completed = true;
|
|
226
|
+
} else if (isInitialWrite) {
|
|
227
|
+
headerData._completed = false; // Initialize as incomplete
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
// Write Strategy:
|
|
231
|
+
// isInitialWrite = TRUE -> merge: false (Wipes old Standard Mode data/schema)
|
|
232
|
+
// isInitialWrite = FALSE -> merge: true (Updates count and status, preserves data)
|
|
233
|
+
await mainDocRef.set(headerData, { merge: !isInitialWrite });
|
|
234
|
+
|
|
235
|
+
runMetrics.io.writes += 1;
|
|
236
|
+
|
|
237
|
+
if (isFinalFlush && calc.manifest.hash) {
|
|
238
|
+
successUpdates[name] = {
|
|
239
|
+
hash: calc.manifest.hash,
|
|
240
|
+
simHash: simHash,
|
|
241
|
+
resultHash: resultHash,
|
|
242
|
+
category: calc.manifest.category,
|
|
243
|
+
composition: calc.manifest.composition,
|
|
244
|
+
metrics: runMetrics
|
|
245
|
+
};
|
|
226
246
|
}
|
|
227
247
|
|
|
228
248
|
continue; // Skip the standard writeSingleResult logic
|