bulltrackers-module 1.0.556 → 1.0.557
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/functions/computation-system/workflows/data_feeder_pipeline.yaml +12 -125
- package/functions/computation-system/workflows/morning_prep_pipeline.yaml +55 -0
- package/functions/orchestrator/index.js +181 -1
- package/functions/task-engine/taskengine_workflow.yaml +83 -0
- package/index.js +9 -1
- package/package.json +1 -1
- package/functions/computation-system/scripts/force_run.js +0 -72
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
# Data Feeder Pipeline (
|
|
2
|
-
#
|
|
1
|
+
# Data Feeder Pipeline (V4.0 - Lean Market Close)
|
|
2
|
+
# Schedule: 22:00 UTC
|
|
3
|
+
# Objective: Capture Market Close data & perform Global Indexing at midnight.
|
|
3
4
|
|
|
4
5
|
main:
|
|
5
6
|
params: [input]
|
|
@@ -10,7 +11,7 @@ main:
|
|
|
10
11
|
- location: "europe-west1"
|
|
11
12
|
|
|
12
13
|
# ==========================================
|
|
13
|
-
# TEST MODE ROUTING
|
|
14
|
+
# TEST MODE ROUTING (Preserved for Debugging)
|
|
14
15
|
# ==========================================
|
|
15
16
|
- check_test_mode:
|
|
16
17
|
switch:
|
|
@@ -18,9 +19,6 @@ main:
|
|
|
18
19
|
steps:
|
|
19
20
|
- route_test:
|
|
20
21
|
switch:
|
|
21
|
-
# --- ISOLATED FUNCTION TESTS (Run & Stop) ---
|
|
22
|
-
# Use these to test a single function without triggering waits or next steps.
|
|
23
|
-
|
|
24
22
|
- condition: '${input.target_step == "test_price"}'
|
|
25
23
|
steps:
|
|
26
24
|
- call_price_iso:
|
|
@@ -42,29 +40,7 @@ main:
|
|
|
42
40
|
timeout: 300
|
|
43
41
|
- return_insights:
|
|
44
42
|
return: "Test Complete: Insights Fetcher executed."
|
|
45
|
-
|
|
46
|
-
- condition: '${input.target_step == "test_rankings"}'
|
|
47
|
-
steps:
|
|
48
|
-
- call_rankings_iso:
|
|
49
|
-
call: http.post
|
|
50
|
-
args:
|
|
51
|
-
url: '${"https://" + location + "-" + project + ".cloudfunctions.net/fetch-popular-investors"}'
|
|
52
|
-
auth: { type: OIDC }
|
|
53
|
-
timeout: 300
|
|
54
|
-
- return_rankings:
|
|
55
|
-
return: "Test Complete: Popular Investors executed."
|
|
56
|
-
|
|
57
|
-
- condition: '${input.target_step == "test_social"}'
|
|
58
|
-
steps:
|
|
59
|
-
- call_social_iso:
|
|
60
|
-
call: http.post
|
|
61
|
-
args:
|
|
62
|
-
url: '${"https://" + location + "-" + project + ".cloudfunctions.net/social-orchestrator"}'
|
|
63
|
-
auth: { type: OIDC }
|
|
64
|
-
timeout: 300
|
|
65
|
-
- return_social:
|
|
66
|
-
return: "Test Complete: Social Orchestrator executed."
|
|
67
|
-
|
|
43
|
+
|
|
68
44
|
- condition: '${input.target_step == "test_indexer"}'
|
|
69
45
|
steps:
|
|
70
46
|
- call_indexer_iso:
|
|
@@ -76,18 +52,11 @@ main:
|
|
|
76
52
|
- return_indexer:
|
|
77
53
|
return: "Test Complete: Root Data Indexer executed."
|
|
78
54
|
|
|
79
|
-
# --- PHASE JUMPS (Resumes flow from that point) ---
|
|
80
|
-
- condition: '${input.target_step == "market"}'
|
|
81
|
-
next: phase_2200_price
|
|
82
|
-
- condition: '${input.target_step == "midnight"}'
|
|
83
|
-
next: phase_0000_rankings
|
|
84
|
-
- condition: '${input.target_step == "social"}'
|
|
85
|
-
next: social_loop_start
|
|
86
|
-
|
|
87
55
|
# ==========================================
|
|
88
56
|
# PHASE 1: MARKET CLOSE (Starts 22:00 UTC)
|
|
89
57
|
# ==========================================
|
|
90
58
|
|
|
59
|
+
# 1. Price Fetcher
|
|
91
60
|
- phase_2200_price:
|
|
92
61
|
try:
|
|
93
62
|
call: http.post
|
|
@@ -101,12 +70,8 @@ main:
|
|
|
101
70
|
- log_price_error:
|
|
102
71
|
call: sys.log
|
|
103
72
|
args: { severity: "WARNING", text: "Price fetch timed out/failed. Proceeding." }
|
|
104
|
-
|
|
105
|
-
# FIXED: Only one 10-minute wait here now.
|
|
106
|
-
- wait_10_after_price:
|
|
107
|
-
call: sys.sleep
|
|
108
|
-
args: { seconds: 600 }
|
|
109
73
|
|
|
74
|
+
# 2. Insights Fetcher (Can run immediately after or parallel)
|
|
110
75
|
- phase_2200_insights:
|
|
111
76
|
try:
|
|
112
77
|
call: http.post
|
|
@@ -121,108 +86,30 @@ main:
|
|
|
121
86
|
call: sys.log
|
|
122
87
|
args: { severity: "WARNING", text: "Insights fetch timed out/failed. Proceeding." }
|
|
123
88
|
|
|
124
|
-
- wait_10_after_insights:
|
|
125
|
-
call: sys.sleep
|
|
126
|
-
args: { seconds: 600 }
|
|
127
|
-
|
|
128
89
|
# ==========================================
|
|
129
|
-
# PHASE 2: WAIT FOR MIDNIGHT
|
|
90
|
+
# PHASE 2: WAIT FOR MIDNIGHT (Indexing)
|
|
130
91
|
# ==========================================
|
|
131
92
|
|
|
132
93
|
- align_to_midnight:
|
|
133
94
|
assign:
|
|
134
95
|
- now_sec: '${int(sys.now())}'
|
|
135
96
|
- day_sec: 86400
|
|
97
|
+
# Calculates seconds remaining until the next 00:00 UTC
|
|
136
98
|
- sleep_midnight: '${day_sec - (now_sec % day_sec)}'
|
|
99
|
+
|
|
137
100
|
- wait_for_midnight:
|
|
138
101
|
call: sys.sleep
|
|
139
102
|
args: { seconds: '${sleep_midnight}' }
|
|
140
103
|
|
|
141
104
|
# ==========================================
|
|
142
|
-
# PHASE 3:
|
|
105
|
+
# PHASE 3: GLOBAL INDEXING (00:00 UTC)
|
|
143
106
|
# ==========================================
|
|
144
107
|
|
|
145
|
-
- phase_0000_rankings:
|
|
146
|
-
try:
|
|
147
|
-
call: http.post
|
|
148
|
-
args:
|
|
149
|
-
url: '${"https://" + location + "-" + project + ".cloudfunctions.net/fetch-popular-investors"}'
|
|
150
|
-
auth: { type: OIDC }
|
|
151
|
-
timeout: 300
|
|
152
|
-
except:
|
|
153
|
-
as: e
|
|
154
|
-
steps:
|
|
155
|
-
- log_ranking_error:
|
|
156
|
-
call: sys.log
|
|
157
|
-
args: { severity: "WARNING", text: "Rankings failed. Proceeding to Social (risky)." }
|
|
158
|
-
|
|
159
|
-
- wait_10_after_rankings:
|
|
160
|
-
call: sys.sleep
|
|
161
|
-
args: { seconds: 600 }
|
|
162
|
-
|
|
163
|
-
- phase_0000_social:
|
|
164
|
-
try:
|
|
165
|
-
call: http.post
|
|
166
|
-
args:
|
|
167
|
-
url: '${"https://" + location + "-" + project + ".cloudfunctions.net/social-orchestrator"}'
|
|
168
|
-
auth: { type: OIDC }
|
|
169
|
-
timeout: 300
|
|
170
|
-
except:
|
|
171
|
-
as: e
|
|
172
|
-
steps:
|
|
173
|
-
- log_social_error:
|
|
174
|
-
call: sys.log
|
|
175
|
-
args: { severity: "WARNING", text: "Social failed. Proceeding." }
|
|
176
|
-
|
|
177
|
-
- wait_10_after_social:
|
|
178
|
-
call: sys.sleep
|
|
179
|
-
args: { seconds: 600 }
|
|
180
|
-
|
|
181
108
|
- global_index_midnight:
|
|
182
109
|
call: http.post
|
|
183
110
|
args:
|
|
184
111
|
url: '${"https://" + location + "-" + project + ".cloudfunctions.net/root-data-indexer"}'
|
|
185
112
|
auth: { type: OIDC }
|
|
186
113
|
|
|
187
|
-
# ==========================================
|
|
188
|
-
# PHASE 4: SOCIAL LOOP (Every 3 Hours)
|
|
189
|
-
# ==========================================
|
|
190
|
-
|
|
191
|
-
- init_social_loop:
|
|
192
|
-
assign:
|
|
193
|
-
- i: 0
|
|
194
|
-
|
|
195
|
-
- social_loop_start:
|
|
196
|
-
switch:
|
|
197
|
-
- condition: ${i < 7} # Covers the remainder of the 24h cycle
|
|
198
|
-
steps:
|
|
199
|
-
- wait_3_hours:
|
|
200
|
-
call: sys.sleep
|
|
201
|
-
args: { seconds: 10800 }
|
|
202
|
-
|
|
203
|
-
- run_social_recurring:
|
|
204
|
-
try:
|
|
205
|
-
call: http.post
|
|
206
|
-
args:
|
|
207
|
-
url: '${"https://" + location + "-" + project + ".cloudfunctions.net/social-orchestrator"}'
|
|
208
|
-
auth: { type: OIDC }
|
|
209
|
-
timeout: 300
|
|
210
|
-
except:
|
|
211
|
-
as: e
|
|
212
|
-
steps:
|
|
213
|
-
- log_loop_social_warn:
|
|
214
|
-
call: sys.log
|
|
215
|
-
args: { severity: "WARNING", text: "Loop Social timed out. Proceeding." }
|
|
216
|
-
|
|
217
|
-
- wait_10_in_loop:
|
|
218
|
-
call: sys.sleep
|
|
219
|
-
args: { seconds: 600 }
|
|
220
|
-
|
|
221
|
-
- increment_loop:
|
|
222
|
-
assign:
|
|
223
|
-
- i: '${i + 1}'
|
|
224
|
-
- next_iteration:
|
|
225
|
-
next: social_loop_start
|
|
226
|
-
|
|
227
114
|
- finish:
|
|
228
|
-
return: "
|
|
115
|
+
return: "Daily Close Cycle Finished (Price, Insights, Indexing)."
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# Morning Prep Pipeline
|
|
2
|
+
# Schedule: 04:00 UTC
|
|
3
|
+
# Objective: Refresh Rankings -> Trigger Daily Update (Task Engine)
|
|
4
|
+
|
|
5
|
+
main:
|
|
6
|
+
params: [input]
|
|
7
|
+
steps:
|
|
8
|
+
- init:
|
|
9
|
+
assign:
|
|
10
|
+
- project: '${sys.get_env("GOOGLE_CLOUD_PROJECT_ID")}'
|
|
11
|
+
- location: "europe-west1"
|
|
12
|
+
# Define the Task Engine workflow ID to trigger later
|
|
13
|
+
- task_engine_workflow_id: "daily-update-pipeline"
|
|
14
|
+
|
|
15
|
+
# ==========================================
|
|
16
|
+
# STEP 1: REFRESH RANKINGS (The Master List)
|
|
17
|
+
# ==========================================
|
|
18
|
+
- fetch_rankings:
|
|
19
|
+
try:
|
|
20
|
+
call: http.post
|
|
21
|
+
args:
|
|
22
|
+
url: '${"https://" + location + "-" + project + ".cloudfunctions.net/fetch-popular-investors"}'
|
|
23
|
+
auth: { type: OIDC }
|
|
24
|
+
timeout: 540
|
|
25
|
+
except:
|
|
26
|
+
as: e
|
|
27
|
+
steps:
|
|
28
|
+
- log_rankings_fail:
|
|
29
|
+
call: sys.log
|
|
30
|
+
args: { severity: "ERROR", text: "Rankings Fetch Failed. Stopping pipeline to prevent stale Task Engine run." }
|
|
31
|
+
- raise_error:
|
|
32
|
+
raise: ${e}
|
|
33
|
+
|
|
34
|
+
# ==========================================
|
|
35
|
+
# STEP 2: TRIGGER TASK ENGINE
|
|
36
|
+
# ==========================================
|
|
37
|
+
# We trigger the workflow execution directly.
|
|
38
|
+
# This replaces the need for a separate 05:00 UTC Scheduler.
|
|
39
|
+
|
|
40
|
+
- trigger_daily_update:
|
|
41
|
+
call: googleapis.workflowexecutions.v1.projects.locations.workflows.executions.create
|
|
42
|
+
args:
|
|
43
|
+
parent: '${"projects/" + project + "/locations/" + location + "/workflows/" + task_engine_workflow_id}'
|
|
44
|
+
body:
|
|
45
|
+
# Pass explicit arguments to the Task Engine
|
|
46
|
+
argument: '{"userTypes": ["normal", "speculator"], "source": "morning_prep_trigger"}'
|
|
47
|
+
result: execution_result
|
|
48
|
+
|
|
49
|
+
- log_trigger:
|
|
50
|
+
call: sys.log
|
|
51
|
+
args:
|
|
52
|
+
text: '${"✅ Rankings Complete. Triggered Task Engine: " + execution_result.name}'
|
|
53
|
+
|
|
54
|
+
- finish:
|
|
55
|
+
return: "Morning Prep Complete. Task Engine launched."
|
|
@@ -2,10 +2,183 @@
|
|
|
2
2
|
* @fileoverview Main orchestration logic.
|
|
3
3
|
* REFACTORED: This file now contains the main pipe functions
|
|
4
4
|
* that are called by the Cloud Function entry points.
|
|
5
|
+
* It includes the new HTTP handlers for Workflow-driven "Slow-Trickle" updates.
|
|
5
6
|
* They receive all dependencies.
|
|
6
7
|
*/
|
|
7
8
|
const { checkDiscoveryNeed, getDiscoveryCandidates, dispatchDiscovery } = require('./helpers/discovery_helpers');
|
|
8
9
|
const { getUpdateTargets, dispatchUpdates } = require('./helpers/update_helpers');
|
|
10
|
+
const { FieldValue } = require('@google-cloud/firestore');
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* ENTRY POINT: HTTP Handler for Workflow Interaction
|
|
14
|
+
* Map this function to your HTTP Trigger in your index.js/exports.
|
|
15
|
+
* This handles the "PLAN" and "EXECUTE_WINDOW" phases of the slow-trickle update.
|
|
16
|
+
* * @param {object} req - Express request object.
|
|
17
|
+
* @param {object} res - Express response object.
|
|
18
|
+
* @param {object} dependencies - Contains logger, db, firestoreUtils, pubsubUtils.
|
|
19
|
+
* @param {object} config - Global configuration.
|
|
20
|
+
*/
|
|
21
|
+
async function handleOrchestratorHttp(req, res, dependencies, config) {
|
|
22
|
+
const { logger } = dependencies;
|
|
23
|
+
const body = req.body || {};
|
|
24
|
+
const { action, userType, date, windows, planId, windowId } = body;
|
|
25
|
+
|
|
26
|
+
logger.log('INFO', `[Orchestrator HTTP] Received request: ${action}`, body);
|
|
27
|
+
|
|
28
|
+
try {
|
|
29
|
+
if (action === 'PLAN') {
|
|
30
|
+
// PHASE 1: Find users and split them into Firestore documents
|
|
31
|
+
if (!userType || !date) {
|
|
32
|
+
throw new Error("Missing userType or date for PLAN action");
|
|
33
|
+
}
|
|
34
|
+
const result = await planDailyUpdates(userType, date, windows || 10, config, dependencies);
|
|
35
|
+
res.status(200).send(result);
|
|
36
|
+
|
|
37
|
+
} else if (action === 'EXECUTE_WINDOW') {
|
|
38
|
+
// PHASE 2: Load specific window and dispatch
|
|
39
|
+
if (!planId || !windowId) {
|
|
40
|
+
throw new Error("Missing planId or windowId for EXECUTE_WINDOW action");
|
|
41
|
+
}
|
|
42
|
+
const result = await executeUpdateWindow(planId, windowId, userType, config, dependencies);
|
|
43
|
+
res.status(200).send(result);
|
|
44
|
+
|
|
45
|
+
} else if (action === 'LEGACY_RUN') {
|
|
46
|
+
// Support for triggering the old brute-force method via HTTP if needed
|
|
47
|
+
await runUpdateOrchestrator(config, dependencies);
|
|
48
|
+
res.status(200).send({ status: 'Completed legacy run' });
|
|
49
|
+
|
|
50
|
+
} else {
|
|
51
|
+
res.status(400).send({ error: `Unknown action: ${action}` });
|
|
52
|
+
}
|
|
53
|
+
} catch (error) {
|
|
54
|
+
logger.log('ERROR', `[Orchestrator HTTP] Fatal error in ${action}`, { errorMessage: error.message, stack: error.stack });
|
|
55
|
+
res.status(500).send({ error: error.message, stack: error.stack });
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* LOGIC: Plan the updates (Split into windows)
|
|
61
|
+
* 1. Fetches all users needing updates.
|
|
62
|
+
* 2. Shuffles them.
|
|
63
|
+
* 3. Splits them into 'n' windows.
|
|
64
|
+
* 4. Saves the windows to Firestore.
|
|
65
|
+
*/
|
|
66
|
+
async function planDailyUpdates(userType, date, numberOfWindows, config, deps) {
|
|
67
|
+
const { logger, db } = deps;
|
|
68
|
+
|
|
69
|
+
// 1. Get ALL targets
|
|
70
|
+
// We construct thresholds to capture everyone due for today
|
|
71
|
+
const now = new Date();
|
|
72
|
+
const startOfTodayUTC = new Date(Date.UTC(now.getUTCFullYear(), now.getUTCMonth(), now.getUTCDate()));
|
|
73
|
+
const DaysAgoUTC = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000);
|
|
74
|
+
|
|
75
|
+
const thresholds = {
|
|
76
|
+
dateThreshold: startOfTodayUTC,
|
|
77
|
+
gracePeriodThreshold: DaysAgoUTC
|
|
78
|
+
};
|
|
79
|
+
|
|
80
|
+
logger.log('INFO', `[Orchestrator Plan] Calculating targets for ${userType}...`);
|
|
81
|
+
|
|
82
|
+
// Reusing existing helper to get the raw list of users
|
|
83
|
+
const targets = await getUpdateTargets(userType, thresholds, config.updateConfig, deps);
|
|
84
|
+
logger.log('INFO', `[Orchestrator Plan] Found ${targets.length} candidates for ${userType}.`);
|
|
85
|
+
|
|
86
|
+
if (targets.length === 0) {
|
|
87
|
+
return { planId: null, totalUsers: 0, windowCount: 0, windowIds: [] };
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// 2. Shuffle to randomize load (Fisher-Yates shuffle)
|
|
91
|
+
// This ensures that we don't always update the same users at the same time of day
|
|
92
|
+
for (let i = targets.length - 1; i > 0; i--) {
|
|
93
|
+
const j = Math.floor(Math.random() * (i + 1));
|
|
94
|
+
[targets[i], targets[j]] = [targets[j], targets[i]];
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// 3. Split and Save
|
|
98
|
+
const chunkSize = Math.ceil(targets.length / numberOfWindows);
|
|
99
|
+
const planId = `plan_${userType}_${date}`;
|
|
100
|
+
const windowIds = [];
|
|
101
|
+
|
|
102
|
+
const batchWriter = db.batch();
|
|
103
|
+
let writeCount = 0;
|
|
104
|
+
|
|
105
|
+
for (let i = 0; i < numberOfWindows; i++) {
|
|
106
|
+
const start = i * chunkSize;
|
|
107
|
+
const end = start + chunkSize;
|
|
108
|
+
const chunk = targets.slice(start, end);
|
|
109
|
+
|
|
110
|
+
if (chunk.length > 0) {
|
|
111
|
+
// Store ONLY the necessary IDs/Data in Firestore
|
|
112
|
+
// Path: system_update_plans/{planId}/windows/{windowId}
|
|
113
|
+
const windowDocRef = db.collection('system_update_plans').doc(planId).collection('windows').doc(String(i + 1));
|
|
114
|
+
|
|
115
|
+
batchWriter.set(windowDocRef, {
|
|
116
|
+
userType: userType,
|
|
117
|
+
users: chunk, // This array contains the user objects/IDs
|
|
118
|
+
status: 'pending',
|
|
119
|
+
windowId: i + 1,
|
|
120
|
+
userCount: chunk.length,
|
|
121
|
+
createdAt: FieldValue.serverTimestamp(),
|
|
122
|
+
scheduledForDate: date
|
|
123
|
+
});
|
|
124
|
+
|
|
125
|
+
windowIds.push(i + 1);
|
|
126
|
+
writeCount++;
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
await batchWriter.commit();
|
|
131
|
+
logger.log('SUCCESS', `[Orchestrator Plan] Plan Saved: ${planId} with ${writeCount} windows containing ${targets.length} users.`);
|
|
132
|
+
|
|
133
|
+
return {
|
|
134
|
+
planId: planId,
|
|
135
|
+
totalUsers: targets.length,
|
|
136
|
+
windowCount: windowIds.length,
|
|
137
|
+
windowIds: windowIds
|
|
138
|
+
};
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
/**
|
|
142
|
+
* LOGIC: Execute a specific window
|
|
143
|
+
* 1. Reads the user list from Firestore.
|
|
144
|
+
* 2. Calls dispatchUpdates to send them to the Task Engine.
|
|
145
|
+
*/
|
|
146
|
+
async function executeUpdateWindow(planId, windowId, userType, config, deps) {
|
|
147
|
+
const { logger, db } = deps;
|
|
148
|
+
|
|
149
|
+
// 1. Fetch the window from Firestore
|
|
150
|
+
const windowRef = db.collection('system_update_plans').doc(planId).collection('windows').doc(String(windowId));
|
|
151
|
+
const windowDoc = await windowRef.get();
|
|
152
|
+
|
|
153
|
+
if (!windowDoc.exists) {
|
|
154
|
+
throw new Error(`Window ${windowId} not found in plan ${planId}`);
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
const data = windowDoc.data();
|
|
158
|
+
|
|
159
|
+
// Idempotency check: prevent re-running a completed window
|
|
160
|
+
if (data.status === 'completed') {
|
|
161
|
+
logger.log('WARN', `[Orchestrator Execute] Window ${windowId} already completed. Skipping.`);
|
|
162
|
+
return { dispatchedCount: 0, status: 'already_completed' };
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
const targets = data.users;
|
|
166
|
+
logger.log('INFO', `[Orchestrator Execute] Window ${windowId}: Dispatching ${targets.length} users.`);
|
|
167
|
+
|
|
168
|
+
// 2. Dispatch using existing helper
|
|
169
|
+
// The helper handles batching for Pub/Sub and logging.
|
|
170
|
+
if (targets && targets.length > 0) {
|
|
171
|
+
await dispatchUpdates(targets, userType, config.updateConfig, deps);
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
// 3. Mark window as complete
|
|
175
|
+
await windowRef.update({
|
|
176
|
+
status: 'completed',
|
|
177
|
+
executedAt: FieldValue.serverTimestamp()
|
|
178
|
+
});
|
|
179
|
+
|
|
180
|
+
return { dispatchedCount: targets.length, status: 'success' };
|
|
181
|
+
}
|
|
9
182
|
|
|
10
183
|
/** Stage 1: Main discovery orchestrator pipe */
|
|
11
184
|
async function runDiscoveryOrchestrator(config, deps) {
|
|
@@ -135,4 +308,11 @@ function isUserTypeEnabled(userType, enabledTypes) {
|
|
|
135
308
|
return enabledTypes.includes(userType);
|
|
136
309
|
}
|
|
137
310
|
|
|
138
|
-
module.exports = {
|
|
311
|
+
module.exports = {
|
|
312
|
+
handleOrchestratorHttp,
|
|
313
|
+
runDiscoveryOrchestrator,
|
|
314
|
+
runUpdateOrchestrator,
|
|
315
|
+
runDiscovery,
|
|
316
|
+
runUpdates,
|
|
317
|
+
isUserTypeEnabled
|
|
318
|
+
};
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# bulltrackers-module/workflows/daily_update_pipeline.yaml
|
|
2
|
+
# Cloud Workflows: Slow-Trickle Daily Update Orchestrator
|
|
3
|
+
# Triggers the Orchestrator to PLAN updates, then EXECUTES them in timed windows.
|
|
4
|
+
|
|
5
|
+
main:
|
|
6
|
+
params: [input]
|
|
7
|
+
steps:
|
|
8
|
+
- init:
|
|
9
|
+
assign:
|
|
10
|
+
- project: '${sys.get_env("GOOGLE_CLOUD_PROJECT_ID")}'
|
|
11
|
+
- location: "europe-west1"
|
|
12
|
+
# Replace with your actual Orchestrator HTTP trigger URL
|
|
13
|
+
- orchestrator_url: '${"https://" + location + "-" + project + ".cloudfunctions.net/orchestrator-http"}'
|
|
14
|
+
- today: '${text.split(time.format(sys.now()), "T")[0]}'
|
|
15
|
+
# User types to process (can be passed in input or defaulted)
|
|
16
|
+
- user_types: '${default(map.get(input, "userTypes"), ["normal", "speculator"])}'
|
|
17
|
+
- default_windows: 10
|
|
18
|
+
|
|
19
|
+
- process_user_types_loop:
|
|
20
|
+
for:
|
|
21
|
+
value: user_type
|
|
22
|
+
in: ${user_types}
|
|
23
|
+
steps:
|
|
24
|
+
- log_start:
|
|
25
|
+
call: sys.log
|
|
26
|
+
args:
|
|
27
|
+
text: '${"Starting update cycle for: " + user_type}'
|
|
28
|
+
|
|
29
|
+
# --- PHASE 1: PLAN ---
|
|
30
|
+
- plan_updates:
|
|
31
|
+
call: http.post
|
|
32
|
+
args:
|
|
33
|
+
url: '${orchestrator_url}'
|
|
34
|
+
body:
|
|
35
|
+
action: 'PLAN'
|
|
36
|
+
userType: '${user_type}'
|
|
37
|
+
date: '${today}'
|
|
38
|
+
windows: '${default_windows}'
|
|
39
|
+
auth: { type: OIDC }
|
|
40
|
+
timeout: 300 # 5 minutes to query and split users
|
|
41
|
+
result: plan_res
|
|
42
|
+
|
|
43
|
+
- log_plan:
|
|
44
|
+
call: sys.log
|
|
45
|
+
args:
|
|
46
|
+
text: '${"📅 PLAN CREATED: " + user_type + " | PlanID: " + plan_res.body.planId + " | Users: " + plan_res.body.totalUsers + " | Windows: " + plan_res.body.windowCount}'
|
|
47
|
+
|
|
48
|
+
# --- PHASE 2: EXECUTE WINDOWS ---
|
|
49
|
+
- run_windows_loop:
|
|
50
|
+
for:
|
|
51
|
+
value: window_id
|
|
52
|
+
in: '${plan_res.body.windowIds}'
|
|
53
|
+
steps:
|
|
54
|
+
- execute_window:
|
|
55
|
+
call: http.post
|
|
56
|
+
args:
|
|
57
|
+
url: '${orchestrator_url}'
|
|
58
|
+
body:
|
|
59
|
+
action: 'EXECUTE_WINDOW'
|
|
60
|
+
planId: '${plan_res.body.planId}'
|
|
61
|
+
windowId: '${window_id}'
|
|
62
|
+
userType: '${user_type}'
|
|
63
|
+
auth: { type: OIDC }
|
|
64
|
+
result: exec_res
|
|
65
|
+
|
|
66
|
+
- log_execution:
|
|
67
|
+
call: sys.log
|
|
68
|
+
args:
|
|
69
|
+
text: '${"🚀 WINDOW EXECUTED: " + user_type + " Window " + window_id + "/" + plan_res.body.windowCount + ". Dispatched: " + exec_res.body.dispatchedCount}'
|
|
70
|
+
|
|
71
|
+
# --- PACING: Sleep between windows ---
|
|
72
|
+
# We skip the sleep after the very last window of the loop
|
|
73
|
+
- check_pacing_needed:
|
|
74
|
+
switch:
|
|
75
|
+
- condition: '${window_id < plan_res.body.windowCount}'
|
|
76
|
+
steps:
|
|
77
|
+
- wait_pacing:
|
|
78
|
+
call: sys.sleep
|
|
79
|
+
args:
|
|
80
|
+
seconds: 3600 # 1 Hour wait between blocks
|
|
81
|
+
|
|
82
|
+
- finish:
|
|
83
|
+
return: "Daily Slow-Trickle Update Completed."
|
package/index.js
CHANGED
|
@@ -11,7 +11,13 @@ const { FirestoreBatchManager } = require('./functions/task-engine/utils/fire
|
|
|
11
11
|
const firestoreUtils = require('./functions/core/utils/firestore_utils');
|
|
12
12
|
|
|
13
13
|
// Orchestrator
|
|
14
|
-
|
|
14
|
+
// [UPDATED] Imported handleOrchestratorHttp
|
|
15
|
+
const {
|
|
16
|
+
runDiscoveryOrchestrator,
|
|
17
|
+
runUpdateOrchestrator,
|
|
18
|
+
handleOrchestratorHttp
|
|
19
|
+
} = require('./functions/orchestrator/index');
|
|
20
|
+
|
|
15
21
|
const { checkDiscoveryNeed, getDiscoveryCandidates, dispatchDiscovery } = require('./functions/orchestrator/helpers/discovery_helpers');
|
|
16
22
|
const { getUpdateTargets, dispatchUpdates } = require('./functions/orchestrator/helpers/update_helpers');
|
|
17
23
|
|
|
@@ -72,6 +78,8 @@ const core = {
|
|
|
72
78
|
};
|
|
73
79
|
|
|
74
80
|
const orchestrator = {
|
|
81
|
+
// [UPDATED] Exported handleOrchestratorHttp so it can be mapped in Cloud Functions
|
|
82
|
+
handleOrchestratorHttp,
|
|
75
83
|
runDiscoveryOrchestrator,
|
|
76
84
|
runUpdateOrchestrator,
|
|
77
85
|
checkDiscoveryNeed,
|
package/package.json
CHANGED
|
@@ -1,72 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Force Run Control Script
|
|
3
|
-
* Usage:
|
|
4
|
-
* node scripts/force_run.js <ComputationName> [YYYY-MM-DD]
|
|
5
|
-
* * Examples:
|
|
6
|
-
* node scripts/force_run.js TestSystemProbe 2026-01-02 (Runs specific day)
|
|
7
|
-
* node scripts/force_run.js TestSystemProbe (Runs ALL days from start)
|
|
8
|
-
*/
|
|
9
|
-
|
|
10
|
-
require('dotenv').config(); // Load environment variables
|
|
11
|
-
const { PubSub } = require('@google-cloud/pubsub');
|
|
12
|
-
|
|
13
|
-
// CONFIGURATION
|
|
14
|
-
const PROJECT_ID = process.env.GCP_PROJECT_ID || 'stocks-12345';
|
|
15
|
-
const TOPIC_NAME = process.env.PUBSUB_TOPIC_DISPATCH || 'dispatch-topic'; // Must match orchestrator_config.js
|
|
16
|
-
const SYSTEM_START_DATE = '2026-01-01'; // The beginning of time for your system
|
|
17
|
-
|
|
18
|
-
async function main() {
|
|
19
|
-
const args = process.argv.slice(2);
|
|
20
|
-
const computationName = args[0];
|
|
21
|
-
const specificDate = args[1];
|
|
22
|
-
|
|
23
|
-
if (!computationName) {
|
|
24
|
-
console.error('❌ Error: Computation Name is required.');
|
|
25
|
-
console.log('Usage: node scripts/force_run.js <ComputationName> [YYYY-MM-DD]');
|
|
26
|
-
process.exit(1);
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
const pubsub = new PubSub({ projectId: PROJECT_ID });
|
|
30
|
-
const topic = pubsub.topic(TOPIC_NAME);
|
|
31
|
-
|
|
32
|
-
// 1. Logic: Single Date vs. All Dates
|
|
33
|
-
if (specificDate) {
|
|
34
|
-
await triggerComputation(topic, computationName, specificDate);
|
|
35
|
-
} else {
|
|
36
|
-
console.log(`⚠️ No date provided. Triggering REPLAY for ${computationName} from ${SYSTEM_START_DATE} to Today...`);
|
|
37
|
-
|
|
38
|
-
let currentDate = new Date(SYSTEM_START_DATE);
|
|
39
|
-
const today = new Date();
|
|
40
|
-
|
|
41
|
-
while (currentDate <= today) {
|
|
42
|
-
const dateStr = currentDate.toISOString().split('T')[0];
|
|
43
|
-
await triggerComputation(topic, computationName, dateStr);
|
|
44
|
-
|
|
45
|
-
// Move to next day
|
|
46
|
-
currentDate.setDate(currentDate.getDate() + 1);
|
|
47
|
-
|
|
48
|
-
// Small throttle to prevent flooding Pub/Sub quota if years of data
|
|
49
|
-
await new Promise(r => setTimeout(r, 100));
|
|
50
|
-
}
|
|
51
|
-
}
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
async function triggerComputation(topic, computation, date) {
|
|
55
|
-
const payload = {
|
|
56
|
-
computationName: computation,
|
|
57
|
-
date: date,
|
|
58
|
-
force: true, // Tells Orchestrator to ignore "Already Complete" status
|
|
59
|
-
source: 'manual-cli' // Audit trail
|
|
60
|
-
};
|
|
61
|
-
|
|
62
|
-
const dataBuffer = Buffer.from(JSON.stringify(payload));
|
|
63
|
-
|
|
64
|
-
try {
|
|
65
|
-
const messageId = await topic.publishMessage({ data: dataBuffer });
|
|
66
|
-
console.log(`✅ [${date}] Triggered ${computation} (Msg ID: ${messageId})`);
|
|
67
|
-
} catch (error) {
|
|
68
|
-
console.error(`❌ [${date}] Failed to trigger ${computation}:`, error.message);
|
|
69
|
-
}
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
main().catch(console.error);
|