bulltrackers-module 1.0.736 → 1.0.738
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/functions/computation-system-v2/config/bulltrackers.config.js +7 -3
- package/functions/computation-system-v2/docs/admin.md +91 -0
- package/functions/computation-system-v2/docs/architecture.md +59 -0
- package/functions/computation-system-v2/framework/execution/Orchestrator.js +20 -0
- package/functions/computation-system-v2/handlers/adminTest.js +327 -0
- package/functions/computation-system-v2/handlers/index.js +4 -0
- package/functions/computation-system-v2/index.js +17 -1
- package/functions/computation-system-v2/test/test-full-pipeline.js +227 -0
- package/functions/computation-system-v2/test/test-worker-pool.js +208 -436
- package/package.json +1 -1
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Full Pipeline Integration Test
|
|
3
|
+
* * Simulates the entire flow from Dispatcher -> Orchestrator -> Worker Pool -> Storage.
|
|
4
|
+
* * Verifies that:
|
|
5
|
+
* 1. The Dispatcher BLOCKS computations with missing mandatory data (RunAnalyzer).
|
|
6
|
+
* 2. Runnable computations are sent to the Worker Pool (if configured).
|
|
7
|
+
* 3. Results are stored in the TEST table (not production).
|
|
8
|
+
* * * USAGE:
|
|
9
|
+
* node test/test-full-pipeline.js --date 2026-01-24
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
process.env.NODE_ENV = 'test';
|
|
13
|
+
process.env.WORKER_LOCAL_MODE = 'true'; // Simulate workers locally
|
|
14
|
+
process.env.WORKER_POOL_ENABLED = 'true';
|
|
15
|
+
|
|
16
|
+
const fs = require('fs');
|
|
17
|
+
const path = require('path');
|
|
18
|
+
const { Orchestrator } = require('../framework/execution/Orchestrator');
|
|
19
|
+
const { TestConfigBuilder } = require('./run-pipeline-test');
|
|
20
|
+
const prodConfig = require('../config/bulltrackers.config');
|
|
21
|
+
|
|
22
|
+
// ============================================================================
|
|
23
|
+
// TEST RUNNER
|
|
24
|
+
// ============================================================================
|
|
25
|
+
|
|
26
|
+
async function runPipelineTest() {
|
|
27
|
+
const args = parseArgs();
|
|
28
|
+
console.log('\n╔════════════════════════════════════════════════════════════╗');
|
|
29
|
+
console.log('║ FULL PIPELINE INTEGRATION TEST ║');
|
|
30
|
+
console.log('╚════════════════════════════════════════════════════════════╝');
|
|
31
|
+
console.log(`📅 Target Date: ${args.date}`);
|
|
32
|
+
console.log(`🧪 Test Table: computation_results_test`);
|
|
33
|
+
console.log(`👷 Worker Pool: ENABLED (Local Simulation)\n`);
|
|
34
|
+
|
|
35
|
+
// 0. DYNAMICALLY LOAD COMPUTATIONS
|
|
36
|
+
// This fixes the "Initialized with 0 computations" error
|
|
37
|
+
const computationsDir = path.join(__dirname, '../computations');
|
|
38
|
+
const loadedComputations = fs.readdirSync(computationsDir)
|
|
39
|
+
.filter(f => f.endsWith('.js'))
|
|
40
|
+
.map(f => require(path.join(computationsDir, f)));
|
|
41
|
+
|
|
42
|
+
// Inject into config
|
|
43
|
+
prodConfig.computations = loadedComputations;
|
|
44
|
+
console.log(`📦 Auto-discovered ${prodConfig.computations.length} computations from /computations directory`);
|
|
45
|
+
|
|
46
|
+
// 1. CONFIGURE TEST ENVIRONMENT
|
|
47
|
+
const builder = new TestConfigBuilder(prodConfig, {
|
|
48
|
+
runId: `test-${Date.now()}`,
|
|
49
|
+
date: args.date,
|
|
50
|
+
testBucket: 'bulltrackers-computation-staging', // <--- FIX: Use your real bucket
|
|
51
|
+
batchSize: 1000,
|
|
52
|
+
concurrency: 2
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
const testConfig = builder.build();
|
|
56
|
+
|
|
57
|
+
// Explicitly enable worker pool in the test config
|
|
58
|
+
testConfig.workerPool = {
|
|
59
|
+
...prodConfig.workerPool,
|
|
60
|
+
enabled: true,
|
|
61
|
+
localMode: true,
|
|
62
|
+
minEntitiesForOffload: 0 // Force everything to worker pool for testing
|
|
63
|
+
};
|
|
64
|
+
|
|
65
|
+
// 2. INITIALIZE ORCHESTRATOR
|
|
66
|
+
const orchestrator = new Orchestrator(testConfig, console);
|
|
67
|
+
await orchestrator.initialize();
|
|
68
|
+
|
|
69
|
+
// -------------------------------------------------------------------------
|
|
70
|
+
// PHASE 1: DISPATCHER VERIFICATION (Pre-Flight Check)
|
|
71
|
+
// -------------------------------------------------------------------------
|
|
72
|
+
console.log('🔍 PHASE 1: DISPATCHER ANALYSIS (The Gatekeeper)');
|
|
73
|
+
console.log(' Verifying that missing data BLOCKS execution...');
|
|
74
|
+
|
|
75
|
+
const analysis = await orchestrator.analyze({ date: args.date });
|
|
76
|
+
|
|
77
|
+
printAnalysisTable(analysis);
|
|
78
|
+
|
|
79
|
+
// Validation: Ensure nothing "Impossible" or "Blocked" is in the runnable list
|
|
80
|
+
const badRunnables = analysis.runnable.filter(r =>
|
|
81
|
+
analysis.blocked.find(b => b.name === r.name) ||
|
|
82
|
+
analysis.impossible.find(i => i.name === r.name)
|
|
83
|
+
);
|
|
84
|
+
|
|
85
|
+
if (badRunnables.length > 0) {
|
|
86
|
+
console.error('❌ CRITICAL FAILURE: Dispatcher marked blocked tasks as runnable!');
|
|
87
|
+
process.exit(1);
|
|
88
|
+
}
|
|
89
|
+
console.log('✅ Dispatcher logic validated. Blocked tasks will NOT run.\n');
|
|
90
|
+
|
|
91
|
+
// -------------------------------------------------------------------------
|
|
92
|
+
// PHASE 2: EXECUTION (Worker Pool & Storage)
|
|
93
|
+
// -------------------------------------------------------------------------
|
|
94
|
+
console.log('🚀 PHASE 2: PIPELINE EXECUTION');
|
|
95
|
+
console.log(' Running only valid tasks via Worker Pool...');
|
|
96
|
+
|
|
97
|
+
// We intercept storage to verify writes without polluting real DB (optional if using test table)
|
|
98
|
+
const storageInterceptor = new TestStorageInterceptor(orchestrator.storageManager);
|
|
99
|
+
orchestrator.storageManager = storageInterceptor;
|
|
100
|
+
|
|
101
|
+
// Run!
|
|
102
|
+
const result = await orchestrator.execute({
|
|
103
|
+
date: args.date,
|
|
104
|
+
dryRun: false // We want to test the full "write" path to the test table
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
// -------------------------------------------------------------------------
|
|
108
|
+
// PHASE 3: VERIFICATION & REPORTING
|
|
109
|
+
// -------------------------------------------------------------------------
|
|
110
|
+
console.log('\n📊 PHASE 3: FINAL REPORT');
|
|
111
|
+
|
|
112
|
+
// 1. Did Blocked Tasks Run?
|
|
113
|
+
const blockedRan = result.completed.filter(c =>
|
|
114
|
+
analysis.blocked.find(b => b.name === c.name)
|
|
115
|
+
);
|
|
116
|
+
|
|
117
|
+
if (blockedRan.length > 0) {
|
|
118
|
+
console.error(`❌ FAILURE: The following BLOCKED tasks executed anyway: ${blockedRan.map(c => c.name).join(', ')}`);
|
|
119
|
+
} else {
|
|
120
|
+
console.log('✅ SUCCESS: No blocked tasks were executed.');
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// 2. Did Runnable Tasks Succeed?
|
|
124
|
+
const runnableNames = analysis.runnable.map(r => r.name);
|
|
125
|
+
const successfulRunnables = result.completed.filter(c => runnableNames.includes(c.name));
|
|
126
|
+
|
|
127
|
+
if (successfulRunnables.length > 0) {
|
|
128
|
+
console.log(`✅ SUCCESS: ${successfulRunnables.length} runnable tasks completed successfully.`);
|
|
129
|
+
} else if (runnableNames.length > 0) {
|
|
130
|
+
console.warn('⚠️ WARNING: Runnable tasks existed but none completed (check errors below).');
|
|
131
|
+
} else {
|
|
132
|
+
console.log('ℹ️ No runnable tasks found (this is expected if data is missing).');
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// 3. Storage Verification
|
|
136
|
+
const writes = storageInterceptor.getSummary();
|
|
137
|
+
console.log(`💾 Storage: Written ${writes.totalEntities} entity results to ${testConfig.resultStore.table}`);
|
|
138
|
+
|
|
139
|
+
if (result.summary.errors > 0) {
|
|
140
|
+
console.log('\n❌ EXECUTION ERRORS:');
|
|
141
|
+
result.errors.forEach(e => console.log(` - ${e.name}: ${e.error}`));
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// ============================================================================
|
|
146
|
+
// HELPERS
|
|
147
|
+
// ============================================================================
|
|
148
|
+
|
|
149
|
+
function printAnalysisTable(analysis) {
|
|
150
|
+
console.log('\n ┌──────────────────────────────┬──────────────┬──────────────────────────────────────────┐');
|
|
151
|
+
console.log(' │ Computation │ Status │ Reason │');
|
|
152
|
+
console.log(' ├──────────────────────────────┼──────────────┼──────────────────────────────────────────┤');
|
|
153
|
+
|
|
154
|
+
const all = [
|
|
155
|
+
...analysis.runnable.map(r => ({ ...r, status: 'RUNNABLE' })),
|
|
156
|
+
...analysis.blocked.map(r => ({ ...r, status: 'BLOCKED' })),
|
|
157
|
+
...analysis.impossible.map(r => ({ ...r, status: 'IMPOSSIBLE' })),
|
|
158
|
+
...analysis.reRuns.map(r => ({ ...r, status: 'RERUN' })),
|
|
159
|
+
...analysis.skipped.map(r => ({ ...r, status: 'SKIPPED' }))
|
|
160
|
+
];
|
|
161
|
+
|
|
162
|
+
all.forEach(row => {
|
|
163
|
+
const name = row.name.padEnd(28).slice(0, 28);
|
|
164
|
+
const status = row.status.padEnd(12);
|
|
165
|
+
const reason = (row.reason || 'Ready to run').padEnd(40).slice(0, 40);
|
|
166
|
+
|
|
167
|
+
let color = '\x1b[37m'; // White
|
|
168
|
+
if (row.status === 'BLOCKED') color = '\x1b[31m'; // Red
|
|
169
|
+
if (row.status === 'RUNNABLE') color = '\x1b[32m'; // Green
|
|
170
|
+
|
|
171
|
+
console.log(` │ ${color}${name}\x1b[0m │ ${color}${status}\x1b[0m │ ${reason} │`);
|
|
172
|
+
});
|
|
173
|
+
console.log(' └──────────────────────────────┴──────────────┴──────────────────────────────────────────┘\n');
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
/**
|
|
177
|
+
* Simple Storage Interceptor to verify writes
|
|
178
|
+
*/
|
|
179
|
+
class TestStorageInterceptor {
|
|
180
|
+
constructor(realStorage) {
|
|
181
|
+
this.realStorage = realStorage;
|
|
182
|
+
this.writes = [];
|
|
183
|
+
|
|
184
|
+
// Proxy methods
|
|
185
|
+
return new Proxy(this, {
|
|
186
|
+
get(target, prop) {
|
|
187
|
+
if (prop in target) return target[prop];
|
|
188
|
+
if (typeof target.realStorage[prop] === 'function') {
|
|
189
|
+
return target.realStorage[prop].bind(target.realStorage);
|
|
190
|
+
}
|
|
191
|
+
return target.realStorage[prop];
|
|
192
|
+
}
|
|
193
|
+
});
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
async commitResults(date, entry, results, depHashes) {
|
|
197
|
+
this.writes.push({
|
|
198
|
+
date,
|
|
199
|
+
computation: entry.name,
|
|
200
|
+
count: Object.keys(results).length
|
|
201
|
+
});
|
|
202
|
+
// Pass through to real storage (which is pointing to test table)
|
|
203
|
+
return this.realStorage.commitResults(date, entry, results, depHashes);
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
getSummary() {
|
|
207
|
+
return {
|
|
208
|
+
totalWrites: this.writes.length,
|
|
209
|
+
totalEntities: this.writes.reduce((sum, w) => sum + w.count, 0),
|
|
210
|
+
details: this.writes
|
|
211
|
+
};
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
function parseArgs() {
|
|
216
|
+
const args = process.argv.slice(2);
|
|
217
|
+
let date = new Date();
|
|
218
|
+
date.setDate(date.getDate() - 1); // Default to yesterday
|
|
219
|
+
|
|
220
|
+
for (let i = 0; i < args.length; i++) {
|
|
221
|
+
if (args[i] === '--date') date = new Date(args[++i]);
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
return { date: date.toISOString().slice(0, 10) };
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
runPipelineTest().catch(console.error);
|