bulltrackers-module 1.0.736 ā 1.0.737
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/functions/computation-system-v2/config/bulltrackers.config.js +7 -3
- package/functions/computation-system-v2/docs/architecture.md +59 -0
- package/functions/computation-system-v2/index.js +2 -0
- package/functions/computation-system-v2/test/test-full-pipeline.js +227 -0
- package/functions/computation-system-v2/test/test-worker-pool.js +208 -436
- package/package.json +1 -1
|
@@ -246,10 +246,12 @@ module.exports = {
|
|
|
246
246
|
execution: {
|
|
247
247
|
// Max concurrent entity processing (per-entity computations)
|
|
248
248
|
// Higher = faster but more memory. Tune based on your Cloud Function memory.
|
|
249
|
-
entityConcurrency:
|
|
249
|
+
entityConcurrency: 50,
|
|
250
250
|
|
|
251
251
|
// Batch size for BigQuery inserts
|
|
252
252
|
insertBatchSize: 500,
|
|
253
|
+
|
|
254
|
+
fetchBatchSize: 30000,
|
|
253
255
|
|
|
254
256
|
// Memory safety: max entities to load for a dependency
|
|
255
257
|
// If a dependency has more entities than this, use getDependency(name, entityId) instead
|
|
@@ -361,7 +363,7 @@ module.exports = {
|
|
|
361
363
|
// Max concurrent worker invocations
|
|
362
364
|
// Higher = faster but more network/GCS load
|
|
363
365
|
// Recommended: 100-200 for production
|
|
364
|
-
concurrency:
|
|
366
|
+
concurrency: 100,
|
|
365
367
|
|
|
366
368
|
// Worker invocation timeout (ms)
|
|
367
369
|
// Should be slightly less than worker function timeout
|
|
@@ -382,6 +384,8 @@ module.exports = {
|
|
|
382
384
|
// Useful for testing specific computations
|
|
383
385
|
forceOffloadComputations: process.env.WORKER_FORCE_COMPUTATIONS
|
|
384
386
|
? process.env.WORKER_FORCE_COMPUTATIONS.split(',')
|
|
385
|
-
: []
|
|
387
|
+
: [],
|
|
388
|
+
|
|
389
|
+
minEntitiesForOffload: 100,
|
|
386
390
|
}
|
|
387
391
|
};
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
```mermaid
|
|
2
|
+
graph TD
|
|
3
|
+
Root((System))
|
|
4
|
+
|
|
5
|
+
%% Subgraph: Scheduling & Control
|
|
6
|
+
subgraph Control_Plane [Control Plane]
|
|
7
|
+
Cron((Timer)) -->|Every Minute| Scheduler[Scheduler Handler]
|
|
8
|
+
Scheduler -->|Find Due & Zombies| StateRepo[(State DB)]
|
|
9
|
+
Scheduler -->|Dispatch Task| CloudTasks[Cloud Tasks Queue]
|
|
10
|
+
CloudTasks -->|HTTP POST w/ Backoff| Dispatcher[Dispatcher Handler]
|
|
11
|
+
Dispatcher -->|Run Computation| Orchestrator[Orchestrator]
|
|
12
|
+
Orchestrator -->|Return Status| Dispatcher
|
|
13
|
+
Dispatcher -.->|Blocked| Return503[503 Retry]
|
|
14
|
+
Return503 -.-> CloudTasks
|
|
15
|
+
Dispatcher -.->|Success / Skipped| Return200[200 OK]
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
%% Subgraph: Execution
|
|
19
|
+
subgraph Execution_Core [Execution Core]
|
|
20
|
+
Orchestrator --> Manifest[Manifest Builder]
|
|
21
|
+
Orchestrator -->|Check Hashes & Deps| StateRepo
|
|
22
|
+
Orchestrator -->|Fetch Data| BigQuery[(BigQuery)]
|
|
23
|
+
Orchestrator --> ExecMode{Mode?}
|
|
24
|
+
|
|
25
|
+
ExecMode -->|Global / Light| LocalExec[Local Execution]
|
|
26
|
+
LocalExec --> Logic[Computation Logic]
|
|
27
|
+
Logic --> LocalExec
|
|
28
|
+
|
|
29
|
+
ExecMode -->|Per-Entity / Heavy| RemoteRunner[Remote Task Runner]
|
|
30
|
+
RemoteRunner -->|Upload Context| GCS[(Cloud Storage)]
|
|
31
|
+
RemoteRunner --> Worker[Worker Handler]
|
|
32
|
+
Worker -->|Download Context| GCS
|
|
33
|
+
Worker -->|Execute| Logic
|
|
34
|
+
Worker -->|Return Result| RemoteRunner
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
%% Subgraph: Persistence
|
|
38
|
+
subgraph Persistence [Persistence Layer]
|
|
39
|
+
LocalExec -->|Commit Results| StateRepo
|
|
40
|
+
RemoteRunner -->|Commit Batch| StateRepo
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
%% Single-root anchoring (critical)
|
|
44
|
+
Root --> Cron
|
|
45
|
+
Root -.-> Orchestrator
|
|
46
|
+
Root -.-> LocalExec
|
|
47
|
+
Root -.-> RemoteRunner
|
|
48
|
+
|
|
49
|
+
%% Styling
|
|
50
|
+
classDef plain fill:#ffffff,stroke:#333,stroke-width:1px;
|
|
51
|
+
classDef db fill:#e1f5fe,stroke:#01579b,stroke-width:2px;
|
|
52
|
+
classDef logic fill:#e8f5e9,stroke:#2e7d32,stroke-width:2px;
|
|
53
|
+
classDef queue fill:#fff9c4,stroke:#fbc02d,stroke-width:2px;
|
|
54
|
+
|
|
55
|
+
class Cron,Scheduler,Dispatcher,Orchestrator,Manifest,LocalExec,RemoteRunner,Worker plain;
|
|
56
|
+
class StateRepo,BigQuery,GCS db;
|
|
57
|
+
class Logic logic;
|
|
58
|
+
class CloudTasks queue;
|
|
59
|
+
```
|
|
@@ -19,10 +19,12 @@ const { ManifestBuilder } = require('./framework/core/Manifest');
|
|
|
19
19
|
const { Computation } = require('./framework/core/Computation');
|
|
20
20
|
|
|
21
21
|
// Add computations to config
|
|
22
|
+
// These are loaded from computation-system-v2/computations folder
|
|
22
23
|
config.computations = [
|
|
23
24
|
require('./computations/UserPortfolioSummary'),
|
|
24
25
|
require('./computations/PopularInvestorProfileMetrics'),
|
|
25
26
|
require('./computations/PopularInvestorRiskAssessment'),
|
|
27
|
+
require('./computations/PopularInvestorRiskMetrics'),
|
|
26
28
|
// Add more computations here as they're migrated
|
|
27
29
|
];
|
|
28
30
|
|
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Full Pipeline Integration Test
|
|
3
|
+
* * Simulates the entire flow from Dispatcher -> Orchestrator -> Worker Pool -> Storage.
|
|
4
|
+
* * Verifies that:
|
|
5
|
+
* 1. The Dispatcher BLOCKS computations with missing mandatory data (RunAnalyzer).
|
|
6
|
+
* 2. Runnable computations are sent to the Worker Pool (if configured).
|
|
7
|
+
* 3. Results are stored in the TEST table (not production).
|
|
8
|
+
* * * USAGE:
|
|
9
|
+
* node test/test-full-pipeline.js --date 2026-01-24
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
process.env.NODE_ENV = 'test';
|
|
13
|
+
process.env.WORKER_LOCAL_MODE = 'true'; // Simulate workers locally
|
|
14
|
+
process.env.WORKER_POOL_ENABLED = 'true';
|
|
15
|
+
|
|
16
|
+
const fs = require('fs');
|
|
17
|
+
const path = require('path');
|
|
18
|
+
const { Orchestrator } = require('../framework/execution/Orchestrator');
|
|
19
|
+
const { TestConfigBuilder } = require('./run-pipeline-test');
|
|
20
|
+
const prodConfig = require('../config/bulltrackers.config');
|
|
21
|
+
|
|
22
|
+
// ============================================================================
|
|
23
|
+
// TEST RUNNER
|
|
24
|
+
// ============================================================================
|
|
25
|
+
|
|
26
|
+
async function runPipelineTest() {
|
|
27
|
+
const args = parseArgs();
|
|
28
|
+
console.log('\nāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
|
|
29
|
+
console.log('ā FULL PIPELINE INTEGRATION TEST ā');
|
|
30
|
+
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
|
|
31
|
+
console.log(`š
Target Date: ${args.date}`);
|
|
32
|
+
console.log(`š§Ŗ Test Table: computation_results_test`);
|
|
33
|
+
console.log(`š· Worker Pool: ENABLED (Local Simulation)\n`);
|
|
34
|
+
|
|
35
|
+
// 0. DYNAMICALLY LOAD COMPUTATIONS
|
|
36
|
+
// This fixes the "Initialized with 0 computations" error
|
|
37
|
+
const computationsDir = path.join(__dirname, '../computations');
|
|
38
|
+
const loadedComputations = fs.readdirSync(computationsDir)
|
|
39
|
+
.filter(f => f.endsWith('.js'))
|
|
40
|
+
.map(f => require(path.join(computationsDir, f)));
|
|
41
|
+
|
|
42
|
+
// Inject into config
|
|
43
|
+
prodConfig.computations = loadedComputations;
|
|
44
|
+
console.log(`š¦ Auto-discovered ${prodConfig.computations.length} computations from /computations directory`);
|
|
45
|
+
|
|
46
|
+
// 1. CONFIGURE TEST ENVIRONMENT
|
|
47
|
+
const builder = new TestConfigBuilder(prodConfig, {
|
|
48
|
+
runId: `test-${Date.now()}`,
|
|
49
|
+
date: args.date,
|
|
50
|
+
testBucket: 'bulltrackers-computation-staging', // <--- FIX: Use your real bucket
|
|
51
|
+
batchSize: 1000,
|
|
52
|
+
concurrency: 2
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
const testConfig = builder.build();
|
|
56
|
+
|
|
57
|
+
// Explicitly enable worker pool in the test config
|
|
58
|
+
testConfig.workerPool = {
|
|
59
|
+
...prodConfig.workerPool,
|
|
60
|
+
enabled: true,
|
|
61
|
+
localMode: true,
|
|
62
|
+
minEntitiesForOffload: 0 // Force everything to worker pool for testing
|
|
63
|
+
};
|
|
64
|
+
|
|
65
|
+
// 2. INITIALIZE ORCHESTRATOR
|
|
66
|
+
const orchestrator = new Orchestrator(testConfig, console);
|
|
67
|
+
await orchestrator.initialize();
|
|
68
|
+
|
|
69
|
+
// -------------------------------------------------------------------------
|
|
70
|
+
// PHASE 1: DISPATCHER VERIFICATION (Pre-Flight Check)
|
|
71
|
+
// -------------------------------------------------------------------------
|
|
72
|
+
console.log('š PHASE 1: DISPATCHER ANALYSIS (The Gatekeeper)');
|
|
73
|
+
console.log(' Verifying that missing data BLOCKS execution...');
|
|
74
|
+
|
|
75
|
+
const analysis = await orchestrator.analyze({ date: args.date });
|
|
76
|
+
|
|
77
|
+
printAnalysisTable(analysis);
|
|
78
|
+
|
|
79
|
+
// Validation: Ensure nothing "Impossible" or "Blocked" is in the runnable list
|
|
80
|
+
const badRunnables = analysis.runnable.filter(r =>
|
|
81
|
+
analysis.blocked.find(b => b.name === r.name) ||
|
|
82
|
+
analysis.impossible.find(i => i.name === r.name)
|
|
83
|
+
);
|
|
84
|
+
|
|
85
|
+
if (badRunnables.length > 0) {
|
|
86
|
+
console.error('ā CRITICAL FAILURE: Dispatcher marked blocked tasks as runnable!');
|
|
87
|
+
process.exit(1);
|
|
88
|
+
}
|
|
89
|
+
console.log('ā
Dispatcher logic validated. Blocked tasks will NOT run.\n');
|
|
90
|
+
|
|
91
|
+
// -------------------------------------------------------------------------
|
|
92
|
+
// PHASE 2: EXECUTION (Worker Pool & Storage)
|
|
93
|
+
// -------------------------------------------------------------------------
|
|
94
|
+
console.log('š PHASE 2: PIPELINE EXECUTION');
|
|
95
|
+
console.log(' Running only valid tasks via Worker Pool...');
|
|
96
|
+
|
|
97
|
+
// We intercept storage to verify writes without polluting real DB (optional if using test table)
|
|
98
|
+
const storageInterceptor = new TestStorageInterceptor(orchestrator.storageManager);
|
|
99
|
+
orchestrator.storageManager = storageInterceptor;
|
|
100
|
+
|
|
101
|
+
// Run!
|
|
102
|
+
const result = await orchestrator.execute({
|
|
103
|
+
date: args.date,
|
|
104
|
+
dryRun: false // We want to test the full "write" path to the test table
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
// -------------------------------------------------------------------------
|
|
108
|
+
// PHASE 3: VERIFICATION & REPORTING
|
|
109
|
+
// -------------------------------------------------------------------------
|
|
110
|
+
console.log('\nš PHASE 3: FINAL REPORT');
|
|
111
|
+
|
|
112
|
+
// 1. Did Blocked Tasks Run?
|
|
113
|
+
const blockedRan = result.completed.filter(c =>
|
|
114
|
+
analysis.blocked.find(b => b.name === c.name)
|
|
115
|
+
);
|
|
116
|
+
|
|
117
|
+
if (blockedRan.length > 0) {
|
|
118
|
+
console.error(`ā FAILURE: The following BLOCKED tasks executed anyway: ${blockedRan.map(c => c.name).join(', ')}`);
|
|
119
|
+
} else {
|
|
120
|
+
console.log('ā
SUCCESS: No blocked tasks were executed.');
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// 2. Did Runnable Tasks Succeed?
|
|
124
|
+
const runnableNames = analysis.runnable.map(r => r.name);
|
|
125
|
+
const successfulRunnables = result.completed.filter(c => runnableNames.includes(c.name));
|
|
126
|
+
|
|
127
|
+
if (successfulRunnables.length > 0) {
|
|
128
|
+
console.log(`ā
SUCCESS: ${successfulRunnables.length} runnable tasks completed successfully.`);
|
|
129
|
+
} else if (runnableNames.length > 0) {
|
|
130
|
+
console.warn('ā ļø WARNING: Runnable tasks existed but none completed (check errors below).');
|
|
131
|
+
} else {
|
|
132
|
+
console.log('ā¹ļø No runnable tasks found (this is expected if data is missing).');
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// 3. Storage Verification
|
|
136
|
+
const writes = storageInterceptor.getSummary();
|
|
137
|
+
console.log(`š¾ Storage: Written ${writes.totalEntities} entity results to ${testConfig.resultStore.table}`);
|
|
138
|
+
|
|
139
|
+
if (result.summary.errors > 0) {
|
|
140
|
+
console.log('\nā EXECUTION ERRORS:');
|
|
141
|
+
result.errors.forEach(e => console.log(` - ${e.name}: ${e.error}`));
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// ============================================================================
|
|
146
|
+
// HELPERS
|
|
147
|
+
// ============================================================================
|
|
148
|
+
|
|
149
|
+
function printAnalysisTable(analysis) {
|
|
150
|
+
console.log('\n āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā¬āāāāāāāāāāāāāāā¬āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
|
|
151
|
+
console.log(' ā Computation ā Status ā Reason ā');
|
|
152
|
+
console.log(' āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā¼āāāāāāāāāāāāāāā¼āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā¤');
|
|
153
|
+
|
|
154
|
+
const all = [
|
|
155
|
+
...analysis.runnable.map(r => ({ ...r, status: 'RUNNABLE' })),
|
|
156
|
+
...analysis.blocked.map(r => ({ ...r, status: 'BLOCKED' })),
|
|
157
|
+
...analysis.impossible.map(r => ({ ...r, status: 'IMPOSSIBLE' })),
|
|
158
|
+
...analysis.reRuns.map(r => ({ ...r, status: 'RERUN' })),
|
|
159
|
+
...analysis.skipped.map(r => ({ ...r, status: 'SKIPPED' }))
|
|
160
|
+
];
|
|
161
|
+
|
|
162
|
+
all.forEach(row => {
|
|
163
|
+
const name = row.name.padEnd(28).slice(0, 28);
|
|
164
|
+
const status = row.status.padEnd(12);
|
|
165
|
+
const reason = (row.reason || 'Ready to run').padEnd(40).slice(0, 40);
|
|
166
|
+
|
|
167
|
+
let color = '\x1b[37m'; // White
|
|
168
|
+
if (row.status === 'BLOCKED') color = '\x1b[31m'; // Red
|
|
169
|
+
if (row.status === 'RUNNABLE') color = '\x1b[32m'; // Green
|
|
170
|
+
|
|
171
|
+
console.log(` ā ${color}${name}\x1b[0m ā ${color}${status}\x1b[0m ā ${reason} ā`);
|
|
172
|
+
});
|
|
173
|
+
console.log(' āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā“āāāāāāāāāāāāāāā“āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā\n');
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
/**
|
|
177
|
+
* Simple Storage Interceptor to verify writes
|
|
178
|
+
*/
|
|
179
|
+
class TestStorageInterceptor {
|
|
180
|
+
constructor(realStorage) {
|
|
181
|
+
this.realStorage = realStorage;
|
|
182
|
+
this.writes = [];
|
|
183
|
+
|
|
184
|
+
// Proxy methods
|
|
185
|
+
return new Proxy(this, {
|
|
186
|
+
get(target, prop) {
|
|
187
|
+
if (prop in target) return target[prop];
|
|
188
|
+
if (typeof target.realStorage[prop] === 'function') {
|
|
189
|
+
return target.realStorage[prop].bind(target.realStorage);
|
|
190
|
+
}
|
|
191
|
+
return target.realStorage[prop];
|
|
192
|
+
}
|
|
193
|
+
});
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
async commitResults(date, entry, results, depHashes) {
|
|
197
|
+
this.writes.push({
|
|
198
|
+
date,
|
|
199
|
+
computation: entry.name,
|
|
200
|
+
count: Object.keys(results).length
|
|
201
|
+
});
|
|
202
|
+
// Pass through to real storage (which is pointing to test table)
|
|
203
|
+
return this.realStorage.commitResults(date, entry, results, depHashes);
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
getSummary() {
|
|
207
|
+
return {
|
|
208
|
+
totalWrites: this.writes.length,
|
|
209
|
+
totalEntities: this.writes.reduce((sum, w) => sum + w.count, 0),
|
|
210
|
+
details: this.writes
|
|
211
|
+
};
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
function parseArgs() {
|
|
216
|
+
const args = process.argv.slice(2);
|
|
217
|
+
let date = new Date();
|
|
218
|
+
date.setDate(date.getDate() - 1); // Default to yesterday
|
|
219
|
+
|
|
220
|
+
for (let i = 0; i < args.length; i++) {
|
|
221
|
+
if (args[i] === '--date') date = new Date(args[++i]);
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
return { date: date.toISOString().slice(0, 10) };
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
runPipelineTest().catch(console.error);
|
|
@@ -1,494 +1,266 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* @fileoverview Worker Pool Test
|
|
3
|
-
*
|
|
4
|
-
*
|
|
5
|
-
*
|
|
6
|
-
*
|
|
7
|
-
*
|
|
8
|
-
*
|
|
9
|
-
*
|
|
10
|
-
*
|
|
11
|
-
*
|
|
12
|
-
*
|
|
13
|
-
*
|
|
14
|
-
*
|
|
15
|
-
*
|
|
16
|
-
* - Tests RemoteTaskRunner in local mode
|
|
17
|
-
* - Tests full Orchestrator with worker pool
|
|
18
|
-
* - Measures performance and validates results
|
|
2
|
+
* @fileoverview Worker Pool Integration Test (Real Data)
|
|
3
|
+
* * Tests the Worker Pool pipeline using REAL BigQuery data.
|
|
4
|
+
* * WHAT IT DOES:
|
|
5
|
+
* 1. Connects to BigQuery using your local credentials.
|
|
6
|
+
* 2. Fetches REAL data for the specified date/computation.
|
|
7
|
+
* 3. Simulates the Orchestrator's data packaging.
|
|
8
|
+
* 4. Runs the worker logic locally (via RemoteTaskRunner localMode).
|
|
9
|
+
* * USAGE:
|
|
10
|
+
* node test/test-worker-pool.js --date 2026-01-24 --computation UserPortfolioSummary
|
|
11
|
+
* * FLAGS:
|
|
12
|
+
* --date YYYY-MM-DD (Required) Target date
|
|
13
|
+
* --computation Name (Default: UserPortfolioSummary)
|
|
14
|
+
* --limit N (Default: 5) Number of entities to test
|
|
15
|
+
* --entities id1,id2 (Optional) Specific entities to test
|
|
19
16
|
*/
|
|
20
17
|
|
|
21
18
|
const path = require('path');
|
|
19
|
+
const { RemoteTaskRunner } = require('../framework/execution/RemoteTaskRunner');
|
|
20
|
+
const { SchemaRegistry } = require('../framework/data/SchemaRegistry');
|
|
21
|
+
const { QueryBuilder } = require('../framework/data/QueryBuilder');
|
|
22
|
+
const { DataFetcher } = require('../framework/data/DataFetcher');
|
|
23
|
+
const config = require('../config/bulltrackers.config');
|
|
22
24
|
|
|
23
|
-
//
|
|
25
|
+
// Force Local Mode for the Worker Pool (runs logic in-process but uses the Runner's pipeline)
|
|
24
26
|
process.env.WORKER_LOCAL_MODE = 'true';
|
|
25
27
|
process.env.WORKER_POOL_ENABLED = 'true';
|
|
26
28
|
|
|
27
|
-
// Import modules
|
|
28
|
-
const { executeLocal, loadComputation, workerHandler } = require('../handlers/worker');
|
|
29
|
-
const { RemoteTaskRunner } = require('../framework/execution/RemoteTaskRunner');
|
|
30
|
-
|
|
31
29
|
// ============================================================================
|
|
32
|
-
//
|
|
30
|
+
// HELPER: Mini Orchestrator (Data Loading)
|
|
33
31
|
// ============================================================================
|
|
34
32
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
function generateMockPortfolioData(userId, positionCount = 5) {
|
|
39
|
-
const positions = [];
|
|
40
|
-
const instruments = ['AAPL', 'GOOGL', 'MSFT', 'AMZN', 'TSLA', 'META', 'NVDA', 'BRK.B', 'JPM', 'V'];
|
|
41
|
-
|
|
42
|
-
for (let i = 0; i < positionCount; i++) {
|
|
43
|
-
const investedAmount = Math.random() * 10000 + 1000;
|
|
44
|
-
const profitPercent = (Math.random() * 100 - 20); // -20% to +80%
|
|
33
|
+
class TestContext {
|
|
34
|
+
constructor() {
|
|
35
|
+
this.logger = console;
|
|
45
36
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
InvestedAmount: investedAmount,
|
|
51
|
-
Value: investedAmount * (1 + profitPercent / 100),
|
|
52
|
-
Profit: investedAmount * profitPercent / 100,
|
|
53
|
-
ProfitPercent: profitPercent,
|
|
54
|
-
OpenedDate: new Date(Date.now() - Math.random() * 365 * 24 * 60 * 60 * 1000).toISOString()
|
|
55
|
-
});
|
|
37
|
+
// Initialize Framework Data Layer
|
|
38
|
+
this.schemaRegistry = new SchemaRegistry(config.bigquery, this.logger);
|
|
39
|
+
this.queryBuilder = new QueryBuilder(config.bigquery, this.schemaRegistry, this.logger);
|
|
40
|
+
this.dataFetcher = new DataFetcher({ ...config.bigquery, tables: config.tables }, this.queryBuilder, this.logger);
|
|
56
41
|
}
|
|
57
42
|
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
}
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
/**
|
|
66
|
-
* Generate mock asset prices
|
|
67
|
-
*/
|
|
68
|
-
function generateMockAssetPrices() {
|
|
69
|
-
return {
|
|
70
|
-
AAPL: { close: 175.50, change: 1.2 },
|
|
71
|
-
GOOGL: { close: 142.30, change: -0.5 },
|
|
72
|
-
MSFT: { close: 378.90, change: 0.8 },
|
|
73
|
-
AMZN: { close: 178.25, change: 2.1 },
|
|
74
|
-
TSLA: { close: 248.50, change: -1.5 },
|
|
75
|
-
META: { close: 505.75, change: 1.8 },
|
|
76
|
-
NVDA: { close: 875.20, change: 3.2 },
|
|
77
|
-
'BRK.B': { close: 410.30, change: 0.3 },
|
|
78
|
-
JPM: { close: 195.80, change: 0.9 },
|
|
79
|
-
V: { close: 275.40, change: 0.6 }
|
|
80
|
-
};
|
|
43
|
+
async initialize() {
|
|
44
|
+
console.log('š Connecting to BigQuery and initializing Schema Registry...');
|
|
45
|
+
// We only warm cache for tables we'll likely need to save time
|
|
46
|
+
// In a real run, we might scan the computation's requirements
|
|
47
|
+
}
|
|
81
48
|
}
|
|
82
49
|
|
|
83
50
|
// ============================================================================
|
|
84
|
-
//
|
|
51
|
+
// MAIN RUNNER
|
|
85
52
|
// ============================================================================
|
|
86
53
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
console.log('
|
|
92
|
-
console.log(
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
const contextPackage = {
|
|
98
|
-
entityData: {
|
|
99
|
-
'portfolio_snapshots': generateMockPortfolioData(entityId, 5),
|
|
100
|
-
'asset_prices': generateMockAssetPrices()
|
|
101
|
-
},
|
|
102
|
-
references: {},
|
|
103
|
-
dependencies: {},
|
|
104
|
-
config: {}
|
|
105
|
-
};
|
|
54
|
+
async function runRealDataTest() {
|
|
55
|
+
const args = parseArgs();
|
|
56
|
+
console.log('\nāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
|
|
57
|
+
console.log('ā WORKER POOL INTEGRATION TEST (REAL DATA) ā');
|
|
58
|
+
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
|
|
59
|
+
console.log(`š
Date: ${args.date}`);
|
|
60
|
+
console.log(`š§® Computation: ${args.computation}`);
|
|
61
|
+
console.log(`š Entity Limit:${args.limit}`);
|
|
62
|
+
console.log(`š§ Local Mode: ENABLED\n`);
|
|
106
63
|
|
|
107
|
-
const
|
|
108
|
-
|
|
64
|
+
const ctx = new TestContext();
|
|
65
|
+
await ctx.initialize();
|
|
66
|
+
|
|
67
|
+
// 1. Load the Computation Class
|
|
68
|
+
const computationPath = path.join(__dirname, `../computations/${args.computation}`);
|
|
69
|
+
let ComputationClass;
|
|
109
70
|
try {
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
date,
|
|
114
|
-
contextPackage
|
|
115
|
-
});
|
|
116
|
-
|
|
117
|
-
const duration = Date.now() - startTime;
|
|
118
|
-
|
|
119
|
-
console.log(`ā
Worker executed successfully in ${duration}ms`);
|
|
120
|
-
console.log(` Entity ID: ${result.entityId}`);
|
|
121
|
-
console.log(` Result:`, JSON.stringify(result.result, null, 2));
|
|
122
|
-
|
|
123
|
-
// Validate result structure
|
|
124
|
-
if (result.result) {
|
|
125
|
-
console.log(` ā Has userId: ${!!result.result.userId}`);
|
|
126
|
-
console.log(` ā Has totalValue: ${!!result.result.totalValue}`);
|
|
127
|
-
console.log(` ā Has positionCount: ${result.result.positionCount}`);
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
return { success: true, duration, result: result.result };
|
|
131
|
-
} catch (error) {
|
|
132
|
-
console.log(`ā Worker execution failed: ${error.message}`);
|
|
133
|
-
console.log(error.stack);
|
|
134
|
-
return { success: false, error: error.message };
|
|
71
|
+
ComputationClass = require(computationPath);
|
|
72
|
+
} catch (e) {
|
|
73
|
+
throw new Error(`Could not load computation at ${computationPath}: ${e.message}`);
|
|
135
74
|
}
|
|
136
|
-
}
|
|
137
75
|
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
const date = new Date().toISOString().slice(0, 10);
|
|
147
|
-
|
|
148
|
-
// Create mock request/response objects
|
|
149
|
-
const mockReq = {
|
|
150
|
-
body: {
|
|
151
|
-
computationName: 'UserPortfolioSummary',
|
|
152
|
-
entityId,
|
|
153
|
-
date,
|
|
154
|
-
localContext: {
|
|
155
|
-
entityData: {
|
|
156
|
-
'portfolio_snapshots': generateMockPortfolioData(entityId, 3),
|
|
157
|
-
'asset_prices': generateMockAssetPrices()
|
|
158
|
-
},
|
|
159
|
-
references: {},
|
|
160
|
-
dependencies: {},
|
|
161
|
-
config: {}
|
|
162
|
-
}
|
|
163
|
-
}
|
|
164
|
-
};
|
|
165
|
-
|
|
166
|
-
let responseData = null;
|
|
167
|
-
let responseStatus = null;
|
|
168
|
-
|
|
169
|
-
const mockRes = {
|
|
170
|
-
status: (code) => {
|
|
171
|
-
responseStatus = code;
|
|
172
|
-
return mockRes;
|
|
173
|
-
},
|
|
174
|
-
json: (data) => {
|
|
175
|
-
responseData = data;
|
|
176
|
-
return mockRes;
|
|
177
|
-
}
|
|
76
|
+
const compConfig = ComputationClass.getConfig();
|
|
77
|
+
const manifestEntry = {
|
|
78
|
+
name: compConfig.name.toLowerCase(),
|
|
79
|
+
originalName: compConfig.name,
|
|
80
|
+
type: compConfig.type,
|
|
81
|
+
requires: compConfig.requires || {},
|
|
82
|
+
dependencies: compConfig.dependencies || [],
|
|
83
|
+
hash: 'test-hash-123'
|
|
178
84
|
};
|
|
179
85
|
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
console.log(`ā
HTTP handler completed in ${duration}ms`);
|
|
188
|
-
console.log(` Status: ${responseStatus}`);
|
|
189
|
-
console.log(` Response status: ${responseData?.status}`);
|
|
190
|
-
|
|
191
|
-
if (responseData?.result) {
|
|
192
|
-
console.log(` Total Value: $${responseData.result.totalValue?.toFixed(2)}`);
|
|
193
|
-
console.log(` Positions: ${responseData.result.positionCount}`);
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
return { success: responseStatus === 200, duration, response: responseData };
|
|
197
|
-
} catch (error) {
|
|
198
|
-
console.log(`ā HTTP handler failed: ${error.message}`);
|
|
199
|
-
return { success: false, error: error.message };
|
|
86
|
+
console.log(`š¦ Loaded ${manifestEntry.originalName}`);
|
|
87
|
+
console.log(` Requires: ${Object.keys(manifestEntry.requires).join(', ')}`);
|
|
88
|
+
|
|
89
|
+
// 2. Determine Driver Table (for batching)
|
|
90
|
+
const driverTable = getDriverTable(manifestEntry.requires, config);
|
|
91
|
+
if (!driverTable) {
|
|
92
|
+
throw new Error(`Could not determine driver table (entityField) for ${args.computation}. Is it a global computation?`);
|
|
200
93
|
}
|
|
201
|
-
|
|
94
|
+
const driverEntityField = config.tables[driverTable].entityField;
|
|
95
|
+
console.log(` Driver Table: ${driverTable} (${driverEntityField})`);
|
|
202
96
|
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
*/
|
|
206
|
-
async function testRemoteTaskRunnerLocal() {
|
|
207
|
-
console.log('\nš¦ TEST 3: RemoteTaskRunner (Local Mode)');
|
|
208
|
-
console.log('='.repeat(60));
|
|
209
|
-
|
|
210
|
-
const config = {
|
|
211
|
-
workerPool: {
|
|
212
|
-
enabled: true,
|
|
213
|
-
localMode: true,
|
|
214
|
-
concurrency: 10
|
|
215
|
-
}
|
|
216
|
-
};
|
|
217
|
-
|
|
218
|
-
const runner = new RemoteTaskRunner(config, console);
|
|
219
|
-
|
|
220
|
-
// Create mock manifest entry
|
|
221
|
-
const entry = {
|
|
222
|
-
name: 'userportfoliosummary',
|
|
223
|
-
originalName: 'UserPortfolioSummary',
|
|
224
|
-
type: 'per-entity'
|
|
225
|
-
};
|
|
226
|
-
|
|
227
|
-
const dateStr = new Date().toISOString().slice(0, 10);
|
|
228
|
-
const baseContext = {
|
|
229
|
-
references: {},
|
|
230
|
-
config: {}
|
|
231
|
-
};
|
|
232
|
-
|
|
233
|
-
// Generate test entities
|
|
234
|
-
const entityIds = ['user-batch-001', 'user-batch-002', 'user-batch-003', 'user-batch-004', 'user-batch-005'];
|
|
235
|
-
const entityDataMap = new Map();
|
|
97
|
+
// 3. Fetch Data (The "Heavy Lifting" usually done by Orchestrator)
|
|
98
|
+
console.log('\nš„ Fetching REAL data from BigQuery...');
|
|
236
99
|
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
100
|
+
// Split requirements
|
|
101
|
+
const { batchRequires, globalRequires } = splitRequirements(manifestEntry.requires, driverTable, config);
|
|
102
|
+
|
|
103
|
+
// Fetch Global Data (Shared)
|
|
104
|
+
let globalData = {};
|
|
105
|
+
if (Object.keys(globalRequires).length > 0) {
|
|
106
|
+
console.log(` Fetching global requirements: ${Object.keys(globalRequires).join(', ')}...`);
|
|
107
|
+
globalData = await ctx.dataFetcher.fetchForComputation(globalRequires, args.date);
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// Fetch Batch Data (Per-Entity)
|
|
111
|
+
console.log(` Fetching batch requirements: ${Object.keys(batchRequires).join(', ')}...`);
|
|
112
|
+
|
|
113
|
+
// Create a specific entity filter if requested
|
|
114
|
+
if (args.entities.length > 0) {
|
|
115
|
+
// Inject where clause for specific entities
|
|
116
|
+
Object.values(batchRequires).forEach(req => {
|
|
117
|
+
req.where = req.where || {};
|
|
118
|
+
req.where[driverEntityField] = args.entities; // This relies on DataFetcher supporting array-based WHERE
|
|
241
119
|
});
|
|
242
120
|
}
|
|
243
|
-
|
|
244
|
-
const depResults = {};
|
|
245
121
|
|
|
246
|
-
|
|
122
|
+
// We fetch one large batch for the test
|
|
123
|
+
const batchStream = ctx.dataFetcher.fetchComputationBatched(batchRequires, args.date, args.limit);
|
|
247
124
|
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
entityDataMap,
|
|
255
|
-
depResults
|
|
256
|
-
);
|
|
257
|
-
|
|
258
|
-
const duration = Date.now() - startTime;
|
|
259
|
-
|
|
260
|
-
console.log(`ā
Batch completed in ${duration}ms`);
|
|
261
|
-
console.log(` Entities processed: ${Object.keys(results).length}/${entityIds.length}`);
|
|
262
|
-
console.log(` Errors: ${errors.length}`);
|
|
263
|
-
console.log(` Throughput: ${(entityIds.length / (duration / 1000)).toFixed(2)} entities/sec`);
|
|
264
|
-
|
|
265
|
-
// Show sample results
|
|
266
|
-
const firstResult = Object.values(results)[0];
|
|
267
|
-
if (firstResult) {
|
|
268
|
-
console.log(` Sample result - Total Value: $${firstResult.totalValue?.toFixed(2)}`);
|
|
269
|
-
}
|
|
270
|
-
|
|
271
|
-
if (errors.length > 0) {
|
|
272
|
-
console.log(` First error: ${errors[0].error}`);
|
|
273
|
-
}
|
|
274
|
-
|
|
275
|
-
return { success: errors.length === 0, duration, resultCount: Object.keys(results).length };
|
|
276
|
-
} catch (error) {
|
|
277
|
-
console.log(`ā RemoteTaskRunner failed: ${error.message}`);
|
|
278
|
-
console.log(error.stack);
|
|
279
|
-
return { success: false, error: error.message };
|
|
125
|
+
// Get the first batch
|
|
126
|
+
const { value: batch, done } = await batchStream.next();
|
|
127
|
+
|
|
128
|
+
if (done || !batch || batch.entityIds.length === 0) {
|
|
129
|
+
console.warn('ā ļø No data found for this date/computation.');
|
|
130
|
+
return;
|
|
280
131
|
}
|
|
281
|
-
}
|
|
282
132
|
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
const config = {
|
|
133
|
+
console.log(`ā
Data fetched. Processing ${batch.entityIds.length} entities: ${batch.entityIds.join(', ')}`);
|
|
134
|
+
|
|
135
|
+
// 4. Initialize RemoteTaskRunner
|
|
136
|
+
// We configured WORKER_LOCAL_MODE=true, so this will run logic in-process
|
|
137
|
+
// but crucially, it will go through the _buildContextPackage logic.
|
|
138
|
+
const runner = new RemoteTaskRunner({
|
|
291
139
|
workerPool: {
|
|
292
140
|
enabled: true,
|
|
293
|
-
localMode: true,
|
|
294
|
-
concurrency:
|
|
141
|
+
localMode: true, // Run in-process
|
|
142
|
+
concurrency: 5
|
|
143
|
+
}
|
|
144
|
+
}, console);
|
|
145
|
+
|
|
146
|
+
// Prepare Context
|
|
147
|
+
const baseContext = {
|
|
148
|
+
references: {}, // TODO: Load ref data if needed
|
|
149
|
+
config: {
|
|
150
|
+
project: config.bigquery.projectId,
|
|
151
|
+
tables: config.tables
|
|
295
152
|
}
|
|
296
153
|
};
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
const entry = {
|
|
301
|
-
name: 'userportfoliosummary',
|
|
302
|
-
originalName: 'UserPortfolioSummary',
|
|
303
|
-
type: 'per-entity'
|
|
304
|
-
};
|
|
305
|
-
|
|
306
|
-
const dateStr = new Date().toISOString().slice(0, 10);
|
|
307
|
-
const baseContext = { references: {}, config: {} };
|
|
308
|
-
|
|
309
|
-
// Generate 100 test entities
|
|
310
|
-
const entityCount = 100;
|
|
311
|
-
const entityIds = Array.from({ length: entityCount }, (_, i) => `user-perf-${i.toString().padStart(4, '0')}`);
|
|
154
|
+
|
|
155
|
+
// Prepare Data Maps
|
|
312
156
|
const entityDataMap = new Map();
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
entityDataMap.set(entityId,
|
|
318
|
-
'portfolio_snapshots': generateMockPortfolioData(entityId, Math.floor(Math.random() * 10) + 1),
|
|
319
|
-
'asset_prices': generateMockAssetPrices()
|
|
320
|
-
});
|
|
157
|
+
const combinedData = { ...batch.data, ...globalData };
|
|
158
|
+
|
|
159
|
+
for (const entityId of batch.entityIds) {
|
|
160
|
+
const entityData = filterDataForEntity(combinedData, entityId, driverEntityField, config);
|
|
161
|
+
entityDataMap.set(entityId, entityData);
|
|
321
162
|
}
|
|
163
|
+
|
|
164
|
+
// 5. Execute via Worker Pool Logic
|
|
165
|
+
console.log('\nš Executing via RemoteTaskRunner (Local Simulation)...');
|
|
322
166
|
|
|
323
|
-
console.log(` Running batch...`);
|
|
324
167
|
const startTime = Date.now();
|
|
325
168
|
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
console.log(
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
duration,
|
|
351
|
-
throughput: parseFloat(throughput),
|
|
352
|
-
resultCount: Object.keys(results).length
|
|
353
|
-
};
|
|
354
|
-
} catch (error) {
|
|
355
|
-
console.log(`ā Performance test failed: ${error.message}`);
|
|
356
|
-
return { success: false, error: error.message };
|
|
169
|
+
const { results, errors } = await runner.runBatch(
|
|
170
|
+
manifestEntry,
|
|
171
|
+
args.date,
|
|
172
|
+
baseContext,
|
|
173
|
+
batch.entityIds,
|
|
174
|
+
entityDataMap,
|
|
175
|
+
{} // Mock dependencies for now
|
|
176
|
+
);
|
|
177
|
+
|
|
178
|
+
const duration = Date.now() - startTime;
|
|
179
|
+
|
|
180
|
+
// 6. Report Results
|
|
181
|
+
console.log('\nāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
|
|
182
|
+
console.log('ā EXECUTION REPORT ā');
|
|
183
|
+
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
|
|
184
|
+
console.log(`ā±ļø Duration: ${duration}ms`);
|
|
185
|
+
console.log(`ā
Success: ${Object.keys(results).length}`);
|
|
186
|
+
console.log(`ā Errors: ${errors.length}`);
|
|
187
|
+
|
|
188
|
+
if (Object.keys(results).length > 0) {
|
|
189
|
+
console.log('\nš Sample Result (First Entity):');
|
|
190
|
+
const sampleId = Object.keys(results)[0];
|
|
191
|
+
console.log(` Entity: ${sampleId}`);
|
|
192
|
+
console.log(JSON.stringify(results[sampleId], null, 2));
|
|
357
193
|
}
|
|
358
|
-
}
|
|
359
194
|
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
async function testErrorHandling() {
|
|
364
|
-
console.log('\nš¦ TEST 5: Error Handling');
|
|
365
|
-
console.log('='.repeat(60));
|
|
366
|
-
|
|
367
|
-
// Test 5a: Unknown computation
|
|
368
|
-
console.log('\n 5a. Unknown computation name:');
|
|
369
|
-
try {
|
|
370
|
-
const result = await executeLocal({
|
|
371
|
-
computationName: 'NonExistentComputation',
|
|
372
|
-
entityId: 'test',
|
|
373
|
-
date: '2026-01-25',
|
|
374
|
-
contextPackage: {}
|
|
375
|
-
});
|
|
376
|
-
console.log(` ā Should have thrown error, got: ${JSON.stringify(result)}`);
|
|
377
|
-
} catch (error) {
|
|
378
|
-
console.log(` ā
Correctly threw error: ${error.message}`);
|
|
195
|
+
if (errors.length > 0) {
|
|
196
|
+
console.log('\nā Errors:');
|
|
197
|
+
errors.forEach(e => console.log(` [${e.entityId}] ${e.error}`));
|
|
379
198
|
}
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
references: {},
|
|
391
|
-
dependencies: {}
|
|
392
|
-
}
|
|
393
|
-
});
|
|
394
|
-
console.log(` ā
Handled gracefully: ${result.result === null ? 'null result' : JSON.stringify(result.result)}`);
|
|
395
|
-
} catch (error) {
|
|
396
|
-
console.log(` ā ļø Error (may be expected): ${error.message}`);
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
// ============================================================================
|
|
202
|
+
// UTILS (Duplicated from Orchestrator logic)
|
|
203
|
+
// ============================================================================
|
|
204
|
+
|
|
205
|
+
function getDriverTable(requires, config) {
|
|
206
|
+
for (const name of Object.keys(requires)) {
|
|
207
|
+
const conf = config.tables[name];
|
|
208
|
+
if (conf && conf.entityField) return name;
|
|
397
209
|
}
|
|
398
|
-
|
|
399
|
-
return { success: true };
|
|
210
|
+
return null;
|
|
400
211
|
}
|
|
401
212
|
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
];
|
|
415
|
-
|
|
416
|
-
const results = [];
|
|
417
|
-
|
|
418
|
-
for (const name of computationNames) {
|
|
419
|
-
try {
|
|
420
|
-
const cls = loadComputation(name);
|
|
421
|
-
if (cls) {
|
|
422
|
-
console.log(` ā
${name}: Loaded successfully`);
|
|
423
|
-
results.push({ name, success: true });
|
|
424
|
-
} else {
|
|
425
|
-
console.log(` ā ${name}: Failed to load (returned null)`);
|
|
426
|
-
results.push({ name, success: false });
|
|
427
|
-
}
|
|
428
|
-
} catch (error) {
|
|
429
|
-
console.log(` ā ${name}: ${error.message}`);
|
|
430
|
-
results.push({ name, success: false, error: error.message });
|
|
213
|
+
function splitRequirements(requires, driverTable, config) {
|
|
214
|
+
const batchRequires = {};
|
|
215
|
+
const globalRequires = {};
|
|
216
|
+
const driverConfig = config.tables[driverTable];
|
|
217
|
+
const driverEntityField = driverConfig ? driverConfig.entityField : null;
|
|
218
|
+
|
|
219
|
+
for (const [name, spec] of Object.entries(requires)) {
|
|
220
|
+
const conf = config.tables[name];
|
|
221
|
+
if (conf && conf.entityField === driverEntityField) {
|
|
222
|
+
batchRequires[name] = spec;
|
|
223
|
+
} else {
|
|
224
|
+
globalRequires[name] = spec;
|
|
431
225
|
}
|
|
432
226
|
}
|
|
433
|
-
|
|
434
|
-
const successCount = results.filter(r => r.success).length;
|
|
435
|
-
console.log(`\n Summary: ${successCount}/${computationNames.length} loaded successfully`);
|
|
436
|
-
|
|
437
|
-
return { success: successCount === computationNames.length, results };
|
|
227
|
+
return { batchRequires, globalRequires };
|
|
438
228
|
}
|
|
439
229
|
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
230
|
+
function filterDataForEntity(data, id, driverEntityField, config) {
|
|
231
|
+
const out = {};
|
|
232
|
+
Object.entries(data).forEach(([tbl, d]) => {
|
|
233
|
+
const conf = config.tables[tbl] || {};
|
|
234
|
+
if (conf.entityField === driverEntityField && d && !Array.isArray(d)) {
|
|
235
|
+
out[tbl] = d[id] || null;
|
|
236
|
+
} else {
|
|
237
|
+
out[tbl] = d;
|
|
238
|
+
}
|
|
239
|
+
});
|
|
240
|
+
return out;
|
|
241
|
+
}
|
|
443
242
|
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
results.performance = await testPerformance();
|
|
460
|
-
results.errorHandling = await testErrorHandling();
|
|
461
|
-
results.loadComputations = await testLoadComputations();
|
|
462
|
-
|
|
463
|
-
// Summary
|
|
464
|
-
console.log('\n');
|
|
465
|
-
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
|
|
466
|
-
console.log('ā TEST SUMMARY ā');
|
|
467
|
-
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
|
|
468
|
-
|
|
469
|
-
let passCount = 0;
|
|
470
|
-
let failCount = 0;
|
|
471
|
-
|
|
472
|
-
for (const [name, result] of Object.entries(results)) {
|
|
473
|
-
const icon = result.success ? 'ā
' : 'ā';
|
|
474
|
-
const status = result.success ? 'PASS' : 'FAIL';
|
|
475
|
-
console.log(`${icon} ${name}: ${status}`);
|
|
476
|
-
if (result.success) passCount++;
|
|
477
|
-
else failCount++;
|
|
478
|
-
}
|
|
479
|
-
|
|
480
|
-
console.log(`\nš Results: ${passCount} passed, ${failCount} failed`);
|
|
481
|
-
|
|
482
|
-
if (results.performance && results.performance.throughput) {
|
|
483
|
-
console.log(`š Performance: ${results.performance.throughput} entities/sec`);
|
|
243
|
+
function parseArgs() {
|
|
244
|
+
const args = process.argv.slice(2);
|
|
245
|
+
const options = {
|
|
246
|
+
date: new Date().toISOString().slice(0, 10),
|
|
247
|
+
computation: 'UserPortfolioSummary',
|
|
248
|
+
limit: 5,
|
|
249
|
+
entities: []
|
|
250
|
+
};
|
|
251
|
+
|
|
252
|
+
for (let i = 0; i < args.length; i++) {
|
|
253
|
+
const arg = args[i];
|
|
254
|
+
if (arg === '--date') options.date = args[++i];
|
|
255
|
+
if (arg === '--computation') options.computation = args[++i];
|
|
256
|
+
if (arg === '--limit') options.limit = parseInt(args[++i], 10);
|
|
257
|
+
if (arg === '--entities') options.entities = args[++i].split(',');
|
|
484
258
|
}
|
|
485
|
-
|
|
486
|
-
// Exit with appropriate code
|
|
487
|
-
process.exit(failCount > 0 ? 1 : 0);
|
|
259
|
+
return options;
|
|
488
260
|
}
|
|
489
261
|
|
|
490
|
-
// Run
|
|
491
|
-
|
|
492
|
-
console.error('Fatal
|
|
262
|
+
// Run
|
|
263
|
+
runRealDataTest().catch(e => {
|
|
264
|
+
console.error('\nš„ Fatal Error:', e);
|
|
493
265
|
process.exit(1);
|
|
494
|
-
});
|
|
266
|
+
});
|