bulltrackers-module 1.0.752 → 1.0.754
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -16,6 +16,9 @@ const QUEUE_NAME = process.env.ORCHESTRATOR_QUEUE || 'task-engine-queue';
|
|
|
16
16
|
const LOCATION = process.env.GCP_REGION || 'europe-west1';
|
|
17
17
|
const PROJECT = process.env.GCP_PROJECT_ID;
|
|
18
18
|
|
|
19
|
+
// --- FEATURE FLAG: Disable Normal/Speculator Users ---
|
|
20
|
+
const ENABLE_LEGACY_USERS = process.env.ENABLE_LEGACY_USERS === 'true';
|
|
21
|
+
|
|
19
22
|
/**
|
|
20
23
|
* ENTRY POINT: HTTP Handler for Workflow Interaction
|
|
21
24
|
*/
|
|
@@ -33,6 +36,14 @@ async function handleOrchestratorHttp(req, res, dependencies, config) {
|
|
|
33
36
|
throw new Error("Missing userType or date for PLAN action");
|
|
34
37
|
}
|
|
35
38
|
|
|
39
|
+
// --- NEW: Block Legacy Users if Disabled ---
|
|
40
|
+
if ((userType === 'normal' || userType === 'speculator') && !ENABLE_LEGACY_USERS) {
|
|
41
|
+
const msg = `[Orchestrator] SKIPPING PLAN for '${userType}': ENABLE_LEGACY_USERS is false.`;
|
|
42
|
+
logger.log('WARN', msg);
|
|
43
|
+
// Return 200 to prevent retry loops in workflows
|
|
44
|
+
return res.status(200).send({ status: 'skipped', message: msg });
|
|
45
|
+
}
|
|
46
|
+
|
|
36
47
|
// Determine self-URL for callback (Cloud Task needs to call this function back)
|
|
37
48
|
// We use the env var passed by GCF (FUNCTION_URI) or construct it manually
|
|
38
49
|
const orchestratorUrl = orchestratorUrlOverride ||
|
|
@@ -47,6 +58,14 @@ async function handleOrchestratorHttp(req, res, dependencies, config) {
|
|
|
47
58
|
if (!planId || !windowId) {
|
|
48
59
|
throw new Error("Missing planId or windowId for EXECUTE_WINDOW action");
|
|
49
60
|
}
|
|
61
|
+
|
|
62
|
+
// --- NEW: Block Legacy Users if Disabled (Double Check) ---
|
|
63
|
+
if ((userType === 'normal' || userType === 'speculator') && !ENABLE_LEGACY_USERS) {
|
|
64
|
+
const msg = `[Orchestrator] SKIPPING EXECUTE_WINDOW for '${userType}': ENABLE_LEGACY_USERS is false.`;
|
|
65
|
+
logger.log('WARN', msg);
|
|
66
|
+
return res.status(200).send({ status: 'skipped', message: msg });
|
|
67
|
+
}
|
|
68
|
+
|
|
50
69
|
const result = await executeUpdateWindow(planId, windowId, userType, config, dependencies);
|
|
51
70
|
res.status(200).send(result);
|
|
52
71
|
|
|
@@ -227,8 +246,13 @@ async function runDiscoveryOrchestrator(config, deps) {
|
|
|
227
246
|
const { logger, firestoreUtils } = deps;
|
|
228
247
|
logger.log('INFO', '🚀 Discovery Orchestrator triggered...');
|
|
229
248
|
await firestoreUtils.resetProxyLocks(deps, config);
|
|
230
|
-
|
|
231
|
-
if (
|
|
249
|
+
|
|
250
|
+
if (ENABLE_LEGACY_USERS) {
|
|
251
|
+
if (isUserTypeEnabled('normal', config.enabledUserTypes)) await runDiscovery('normal', config.discoveryConfig.normal, config, deps);
|
|
252
|
+
if (isUserTypeEnabled('speculator', config.enabledUserTypes)) await runDiscovery('speculator', config.discoveryConfig.speculator, config, deps);
|
|
253
|
+
} else {
|
|
254
|
+
logger.log('INFO', 'Discovery skipped for legacy users (normal/speculator) because ENABLE_LEGACY_USERS is false.');
|
|
255
|
+
}
|
|
232
256
|
}
|
|
233
257
|
|
|
234
258
|
async function runUpdateOrchestrator(config, deps) {
|
|
@@ -237,8 +261,13 @@ async function runUpdateOrchestrator(config, deps) {
|
|
|
237
261
|
await firestoreUtils.resetProxyLocks(deps, config);
|
|
238
262
|
const enabledTypes = config.enabledUserTypes || [];
|
|
239
263
|
|
|
240
|
-
if (
|
|
241
|
-
|
|
264
|
+
if (ENABLE_LEGACY_USERS) {
|
|
265
|
+
if (isUserTypeEnabled('normal', enabledTypes)) await runUpdates('normal', config.updateConfig, config, deps);
|
|
266
|
+
if (isUserTypeEnabled('speculator', enabledTypes)) await runUpdates('speculator', config.updateConfig, config, deps);
|
|
267
|
+
} else {
|
|
268
|
+
logger.log('INFO', 'Updates skipped for legacy users (normal/speculator) because ENABLE_LEGACY_USERS is false.');
|
|
269
|
+
}
|
|
270
|
+
|
|
242
271
|
if (isUserTypeEnabled('popular_investor', enabledTypes)) {
|
|
243
272
|
const piConfig = { ...config.updateConfig, popularInvestorRankingsCollection: config.updateConfig.popularInvestorRankingsCollection || 'popular_investor_rankings' };
|
|
244
273
|
await runUpdates('popular_investor', piConfig, config, deps);
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
# ==============================================================================
|
|
4
|
+
# BULLTRACKERS TASK ENGINE END-TO-END TESTER
|
|
5
|
+
# This script triggers the Orchestrator to plan an immediate execution window.
|
|
6
|
+
# ==============================================================================
|
|
7
|
+
|
|
8
|
+
# --- CONFIGURATION ---
|
|
9
|
+
FUNCTION_NAME="orchestrator-http"
|
|
10
|
+
REGION="europe-west1"
|
|
11
|
+
DATE=$(date +%Y-%m-%d) # Defaults to today
|
|
12
|
+
USER_TYPE="popular_investor" # Options: normal, speculator, popular_investor
|
|
13
|
+
WINDOWS=1 # 1 window = immediate execution (0s delay)
|
|
14
|
+
|
|
15
|
+
# --- 1. FETCH URL DYNAMICALLY ---
|
|
16
|
+
echo "🔍 Fetching URL for function: $FUNCTION_NAME ($REGION)..."
|
|
17
|
+
|
|
18
|
+
# Try Gen 2 (Cloud Run) URL first
|
|
19
|
+
URL=$(gcloud functions describe $FUNCTION_NAME --region=$REGION --format='value(serviceConfig.uri)' 2>/dev/null)
|
|
20
|
+
|
|
21
|
+
# Fallback to Gen 1 if empty
|
|
22
|
+
if [ -z "$URL" ]; then
|
|
23
|
+
URL=$(gcloud functions describe $FUNCTION_NAME --region=$REGION --format='value(httpsTrigger.url)' 2>/dev/null)
|
|
24
|
+
fi
|
|
25
|
+
|
|
26
|
+
if [ -z "$URL" ]; then
|
|
27
|
+
echo "❌ Error: Could not find URL for function '$FUNCTION_NAME'. Check if it is deployed."
|
|
28
|
+
exit 1
|
|
29
|
+
fi
|
|
30
|
+
|
|
31
|
+
echo "✅ Target URL: $URL"
|
|
32
|
+
|
|
33
|
+
# --- 2. GET AUTH TOKEN ---
|
|
34
|
+
echo "🔑 Generating Identity Token..."
|
|
35
|
+
TOKEN=$(gcloud auth print-identity-token)
|
|
36
|
+
|
|
37
|
+
if [ -z "$TOKEN" ]; then
|
|
38
|
+
echo "❌ Error: Could not generate token. Run 'gcloud auth login' first."
|
|
39
|
+
exit 1
|
|
40
|
+
fi
|
|
41
|
+
|
|
42
|
+
# --- 3. SEND REQUEST ---
|
|
43
|
+
echo "🚀 Triggering Plan for $USER_TYPE on $DATE ($WINDOWS window)..."
|
|
44
|
+
|
|
45
|
+
RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$URL" \
|
|
46
|
+
-H "Authorization: Bearer $TOKEN" \
|
|
47
|
+
-H "Content-Type: application/json" \
|
|
48
|
+
-d "{
|
|
49
|
+
\"action\": \"PLAN\",
|
|
50
|
+
\"userType\": \"$USER_TYPE\",
|
|
51
|
+
\"date\": \"$DATE\",
|
|
52
|
+
\"windows\": $WINDOWS
|
|
53
|
+
}")
|
|
54
|
+
|
|
55
|
+
# --- 4. PARSE RESPONSE ---
|
|
56
|
+
HTTP_BODY=$(echo "$RESPONSE" | head -n -1)
|
|
57
|
+
HTTP_CODE=$(echo "$RESPONSE" | tail -n 1)
|
|
58
|
+
|
|
59
|
+
if [ "$HTTP_CODE" -eq 200 ]; then
|
|
60
|
+
echo ""
|
|
61
|
+
echo "✅ SUCCESS (HTTP 200)"
|
|
62
|
+
echo "---------------------------------------------------"
|
|
63
|
+
echo "$HTTP_BODY" | python3 -m json.tool 2>/dev/null || echo "$HTTP_BODY"
|
|
64
|
+
echo "---------------------------------------------------"
|
|
65
|
+
echo "👉 Monitor 'task-engine-queue' in Cloud Tasks Console."
|
|
66
|
+
echo "👉 Check Logs Explorer for 'Orchestrator' and 'Dispatcher'."
|
|
67
|
+
else
|
|
68
|
+
echo ""
|
|
69
|
+
echo "❌ FAILED (HTTP $HTTP_CODE)"
|
|
70
|
+
echo "---------------------------------------------------"
|
|
71
|
+
echo "$HTTP_BODY"
|
|
72
|
+
echo "---------------------------------------------------"
|
|
73
|
+
fi
|
|
@@ -1,26 +1,17 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview BigQuery Batch Manager for Task Engine
|
|
3
|
-
*
|
|
4
|
-
*
|
|
5
|
-
* Integrates with FirestoreBatchManager to flush together.
|
|
6
|
-
*
|
|
7
|
-
* UPDATED: Now uses insertRowsWithMerge to prevent duplicate rows
|
|
8
|
-
* when a user is processed twice on the same day.
|
|
9
|
-
*
|
|
10
|
-
* Deduplication keys:
|
|
11
|
-
* - portfolio_snapshots: ['date', 'user_id', 'user_type']
|
|
12
|
-
* - trade_history_snapshots: ['date', 'user_id', 'user_type']
|
|
13
|
-
* - social_post_snapshots: ['date', 'user_id', 'user_type']
|
|
3
|
+
* * FIXED: Switched to APPEND-ONLY (insertRows) to avoid DML quotas.
|
|
4
|
+
* PREVIOUSLY: Used MERGE, which hit the 1,500 DML/day limit.
|
|
14
5
|
*/
|
|
15
6
|
|
|
16
7
|
const {
|
|
17
8
|
ensurePortfolioSnapshotsTable,
|
|
18
9
|
ensureTradeHistorySnapshotsTable,
|
|
19
10
|
ensureSocialPostSnapshotsTable,
|
|
20
|
-
|
|
11
|
+
insertRows // <--- CHANGED: Using direct insert instead of merge
|
|
21
12
|
} = require('../../core/utils/bigquery_utils');
|
|
22
13
|
|
|
23
|
-
// Deduplication keys for
|
|
14
|
+
// Deduplication keys (Kept for reference, though not used in Append mode)
|
|
24
15
|
const PORTFOLIO_DEDUP_KEYS = ['date', 'user_id', 'user_type'];
|
|
25
16
|
const HISTORY_DEDUP_KEYS = ['date', 'user_id', 'user_type'];
|
|
26
17
|
const SOCIAL_DEDUP_KEYS = ['date', 'user_id', 'user_type'];
|
|
@@ -43,9 +34,6 @@ class BigQueryBatchManager {
|
|
|
43
34
|
};
|
|
44
35
|
}
|
|
45
36
|
|
|
46
|
-
/**
|
|
47
|
-
* Add portfolio row to buffer
|
|
48
|
-
*/
|
|
49
37
|
async addPortfolioRow(row) {
|
|
50
38
|
if (!this.tablesEnsured.portfolio) {
|
|
51
39
|
await ensurePortfolioSnapshotsTable(this.logger);
|
|
@@ -54,9 +42,6 @@ class BigQueryBatchManager {
|
|
|
54
42
|
this.portfolioBuffer.push(row);
|
|
55
43
|
}
|
|
56
44
|
|
|
57
|
-
/**
|
|
58
|
-
* Add trade history row to buffer
|
|
59
|
-
*/
|
|
60
45
|
async addHistoryRow(row) {
|
|
61
46
|
if (!this.tablesEnsured.history) {
|
|
62
47
|
await ensureTradeHistorySnapshotsTable(this.logger);
|
|
@@ -65,9 +50,6 @@ class BigQueryBatchManager {
|
|
|
65
50
|
this.historyBuffer.push(row);
|
|
66
51
|
}
|
|
67
52
|
|
|
68
|
-
/**
|
|
69
|
-
* Add social post row to buffer
|
|
70
|
-
*/
|
|
71
53
|
async addSocialRow(row) {
|
|
72
54
|
if (!this.tablesEnsured.social) {
|
|
73
55
|
await ensureSocialPostSnapshotsTable(this.logger);
|
|
@@ -77,55 +59,47 @@ class BigQueryBatchManager {
|
|
|
77
59
|
}
|
|
78
60
|
|
|
79
61
|
/**
|
|
80
|
-
* Flush a buffer to BigQuery using
|
|
81
|
-
*
|
|
62
|
+
* Flush a buffer to BigQuery using APPEND (Load Job)
|
|
63
|
+
* FIXED: Removed MERGE logic to bypass DML quotas.
|
|
82
64
|
*/
|
|
83
|
-
async _flushBuffer(buffer, tableId, tableName
|
|
65
|
+
async _flushBuffer(buffer, tableId, tableName) {
|
|
84
66
|
if (buffer.length === 0) return 0;
|
|
85
67
|
|
|
86
|
-
const rows = [...buffer];
|
|
87
|
-
buffer.length = 0;
|
|
68
|
+
const rows = [...buffer];
|
|
69
|
+
buffer.length = 0;
|
|
88
70
|
|
|
89
71
|
try {
|
|
90
|
-
//
|
|
91
|
-
// This
|
|
92
|
-
|
|
72
|
+
// CHANGED: insertRows uses a LOAD Job with WRITE_APPEND.
|
|
73
|
+
// This is FREE and has a 100,000 jobs/day limit.
|
|
74
|
+
await insertRows(
|
|
93
75
|
this.datasetId,
|
|
94
76
|
tableId,
|
|
95
77
|
rows,
|
|
96
|
-
dedupKeys,
|
|
97
78
|
this.logger
|
|
98
79
|
);
|
|
99
80
|
|
|
100
81
|
if (this.logger) {
|
|
101
|
-
this.logger.log('INFO', `[BigQueryBatch] ✅
|
|
82
|
+
this.logger.log('INFO', `[BigQueryBatch] ✅ Appended ${rows.length} ${tableName} rows to BigQuery (Load Job)`);
|
|
102
83
|
}
|
|
103
84
|
|
|
104
85
|
return rows.length;
|
|
105
86
|
} catch (error) {
|
|
106
|
-
// Log error but don't throw - allow Firestore writes to continue
|
|
107
87
|
if (this.logger) {
|
|
108
88
|
this.logger.log('WARN', `[BigQueryBatch] Failed to flush ${tableName} to BigQuery: ${error.message}`);
|
|
109
89
|
}
|
|
110
|
-
// Put rows back in buffer for retry
|
|
90
|
+
// Put rows back in buffer for retry
|
|
111
91
|
buffer.push(...rows);
|
|
112
92
|
return 0;
|
|
113
93
|
}
|
|
114
94
|
}
|
|
115
95
|
|
|
116
|
-
/**
|
|
117
|
-
* Flush all buffers to BigQuery
|
|
118
|
-
* Called by FirestoreBatchManager.flushBatches()
|
|
119
|
-
*/
|
|
120
96
|
async flushBatches() {
|
|
121
|
-
if (process.env.BIGQUERY_ENABLED === 'false')
|
|
122
|
-
return; // Skip if BigQuery disabled
|
|
123
|
-
}
|
|
97
|
+
if (process.env.BIGQUERY_ENABLED === 'false') return;
|
|
124
98
|
|
|
125
99
|
const results = await Promise.allSettled([
|
|
126
|
-
this._flushBuffer(this.portfolioBuffer, 'portfolio_snapshots', 'portfolio'
|
|
127
|
-
this._flushBuffer(this.historyBuffer, 'trade_history_snapshots', 'history'
|
|
128
|
-
this._flushBuffer(this.socialBuffer, 'social_post_snapshots', 'social'
|
|
100
|
+
this._flushBuffer(this.portfolioBuffer, 'portfolio_snapshots', 'portfolio'),
|
|
101
|
+
this._flushBuffer(this.historyBuffer, 'trade_history_snapshots', 'history'),
|
|
102
|
+
this._flushBuffer(this.socialBuffer, 'social_post_snapshots', 'social')
|
|
129
103
|
]);
|
|
130
104
|
|
|
131
105
|
const totalFlushed = results
|
|
@@ -133,13 +107,10 @@ class BigQueryBatchManager {
|
|
|
133
107
|
.reduce((sum, r) => sum + r.value, 0);
|
|
134
108
|
|
|
135
109
|
if (totalFlushed > 0 && this.logger) {
|
|
136
|
-
this.logger.log('INFO', `[BigQueryBatch] Flushed ${totalFlushed} total rows
|
|
110
|
+
this.logger.log('INFO', `[BigQueryBatch] Flushed ${totalFlushed} total rows.`);
|
|
137
111
|
}
|
|
138
112
|
}
|
|
139
113
|
|
|
140
|
-
/**
|
|
141
|
-
* Get buffer sizes (for monitoring)
|
|
142
|
-
*/
|
|
143
114
|
getBufferSizes() {
|
|
144
115
|
return {
|
|
145
116
|
portfolio: this.portfolioBuffer.length,
|
|
@@ -149,4 +120,4 @@ class BigQueryBatchManager {
|
|
|
149
120
|
}
|
|
150
121
|
}
|
|
151
122
|
|
|
152
|
-
module.exports = { BigQueryBatchManager };
|
|
123
|
+
module.exports = { BigQueryBatchManager };
|
|
@@ -1,7 +1,5 @@
|
|
|
1
1
|
/** @fileoverview Utility class to manage stateful Firestore write batches.
|
|
2
|
-
* REFACTORED:
|
|
3
|
-
* Mapped new user types to their respective collections.
|
|
4
|
-
* IMPLEMENTS: Round-Robin Sharding for maximum user density per document.
|
|
2
|
+
* REFACTORED: Increased default batch size to 500 to reduce API calls.
|
|
5
3
|
*/
|
|
6
4
|
|
|
7
5
|
const { FieldValue } = require('@google-cloud/firestore');
|
|
@@ -73,14 +71,16 @@ class FirestoreBatchManager {
|
|
|
73
71
|
return `cid_map_shard_${Math.floor(parseInt(cid) / 10000) % 10}`;
|
|
74
72
|
}
|
|
75
73
|
|
|
76
|
-
async _scheduleFlush() {
|
|
77
|
-
//
|
|
78
|
-
|
|
74
|
+
async _scheduleFlush() {
|
|
75
|
+
// OPTIMIZATION: Increased default to 500.
|
|
76
|
+
// Firestore limit is 500 writes. Since we shard users into buckets,
|
|
77
|
+
// 500 users results in much fewer than 500 writes (likely <10 writes).
|
|
78
|
+
const maxBatch = this.config.TASK_ENGINE_MAX_BATCH_SIZE ? Number(this.config.TASK_ENGINE_MAX_BATCH_SIZE) : 500;
|
|
79
79
|
const totalOps = this._estimateBatchSize();
|
|
80
80
|
|
|
81
81
|
if (totalOps >= maxBatch) {
|
|
82
82
|
this.logger.log('INFO', `[BATCH] Hit limit (${totalOps} >= ${maxBatch}). Flushing...`);
|
|
83
|
-
await this.flushBatches();
|
|
83
|
+
await this.flushBatches();
|
|
84
84
|
return;
|
|
85
85
|
}
|
|
86
86
|
}
|
|
@@ -135,7 +135,7 @@ class FirestoreBatchManager {
|
|
|
135
135
|
this.usernameMapUpdates[shardId] = {};
|
|
136
136
|
}
|
|
137
137
|
this.usernameMapUpdates[shardId][cidStr] = { username };
|
|
138
|
-
this._scheduleFlush();
|
|
138
|
+
this._scheduleFlush();
|
|
139
139
|
}
|
|
140
140
|
|
|
141
141
|
/**
|