bulltrackers-module 1.0.282 → 1.0.284

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,8 @@
1
1
  /**
2
2
  * FILENAME: computation-system/helpers/computation_dispatcher.js
3
- * PURPOSE: "Smart Dispatcher" - Analyzes state and only dispatches valid, runnable tasks.
4
- * UPDATED: Implements Zombie Task Recovery & Dependency Result Hash Passing.
3
+ * PURPOSE: "Smart Dispatcher" - Analyzes state and dispatches tasks.
4
+ * UPDATED: Removed "Zombie" logic. Now forcefully dispatches any task
5
+ * that is not explicitly COMPLETED, ensuring reliability for one-shot execution.
5
6
  */
6
7
 
7
8
  const { getExpectedDateStrings, normalizeName, DEFINITIVE_EARLIEST_DATES } = require('../utils/utils.js');
@@ -116,7 +117,6 @@ async function dispatchComputationPass(config, dependencies, computationManifest
116
117
  hash: item.hash || item.newHash,
117
118
  previousCategory: item.previousCategory || null,
118
119
  triggerReason: item.reason || "Unknown",
119
- // [NEW] Pass Content-Based hashes provided by analyzeDateExecution
120
120
  dependencyResultHashes: item.dependencyResultHashes || {},
121
121
  timestamp: Date.now()
122
122
  });
@@ -142,21 +142,25 @@ async function dispatchComputationPass(config, dependencies, computationManifest
142
142
  await db.runTransaction(async (t) => {
143
143
  const doc = await t.get(ledgerRef);
144
144
 
145
- // [NEW] Zombie Task Recovery Check
145
+ // [UPDATED] Robust One-Shot Dispatch Logic
146
+ // We REMOVED the "Zombie Timeout" check.
147
+ // If the Dispatcher is running, we assume the user intends to ensure these tasks are dispatched.
148
+
146
149
  if (doc.exists) {
147
150
  const data = doc.data();
148
- const now = Date.now();
149
- const isPending = data.status === 'PENDING';
150
- // A task is a zombie if it is PENDING and the lease has expired (or lease is missing but it's been > 1h)
151
- const isLeaseExpired = data.leaseExpiresAt && data.leaseExpiresAt < now;
152
- // Fallback: If no lease exists, assume 1 hour timeout for legacy zombie detection
153
- const isLegacyZombie = !data.leaseExpiresAt && data.createdAt && (now - data.createdAt.toMillis() > 3600000);
154
-
155
- if (isPending && !isLeaseExpired && !isLegacyZombie) {
156
- return false; // Valid active pending task, do not double dispatch
151
+
152
+ // 1. If it's already COMPLETED, do not re-run (Strict idempotency).
153
+ if (data.status === 'COMPLETED') {
154
+ return false;
157
155
  }
156
+
157
+ // 2. If it is PENDING or IN_PROGRESS:
158
+ // Since the Dispatcher runs ONCE per day, seeing PENDING here means
159
+ // the *previous* run failed to complete, or the worker died.
160
+ // We overwrite it to force a restart.
158
161
  }
159
162
 
163
+ // Create/Overwrite entry with PENDING to start the cycle
160
164
  t.set(ledgerRef, {
161
165
  status: 'PENDING',
162
166
  dispatchId: task.dispatchId,
@@ -165,8 +169,9 @@ async function dispatchComputationPass(config, dependencies, computationManifest
165
169
  createdAt: new Date(),
166
170
  dispatcherHash: currentManifestHash,
167
171
  triggerReason: task.triggerReason,
168
- retries: 0
172
+ retries: 0 // Reset retries for the new attempt
169
173
  }, { merge: true });
174
+
170
175
  return true;
171
176
  });
172
177
 
@@ -191,7 +196,7 @@ async function dispatchComputationPass(config, dependencies, computationManifest
191
196
 
192
197
  return { dispatched: finalDispatched.length };
193
198
  } else {
194
- logger.log('INFO', `[Dispatcher] All tasks were already PENDING (Double Dispatch avoided).`);
199
+ logger.log('INFO', `[Dispatcher] All tasks were already COMPLETED (Double Dispatch avoided).`);
195
200
  return { dispatched: 0 };
196
201
  }
197
202
 
@@ -1,7 +1,8 @@
1
1
  /**
2
2
  * FILENAME: computation-system/helpers/computation_worker.js
3
3
  * PURPOSE: Consumes computation tasks from Pub/Sub.
4
- * UPDATED: Implements Lease Claiming and passes Dependency Hashes.
4
+ * UPDATED: Fixed "Silent Failure" bug where tasks got stuck in PENDING.
5
+ * Increased MAX_RETRIES and ensured Ledger is updated on poison messages.
5
6
  */
6
7
 
7
8
  const { executeDispatchTask } = require('../WorkflowOrchestrator.js');
@@ -13,7 +14,10 @@ let calculationPackage;
13
14
  try { calculationPackage = require('aiden-shared-calculations-unified');
14
15
  } catch (e) {console.error("FATAL: Could not load 'aiden-shared-calculations-unified'."); throw e; }
15
16
  const calculations = calculationPackage.calculations;
16
- const MAX_RETRIES = 0;
17
+
18
+ // [FIX] Increased from 0 to 3.
19
+ // 0 caused "retryCount >= MAX_RETRIES" to trigger immediately on the first run.
20
+ const MAX_RETRIES = 3;
17
21
 
18
22
  async function handleComputationTask(message, config, dependencies) {
19
23
  const systemLogger = new StructuredLogger({ minLevel: config.minLevel || 'INFO', enableStructured: true, ...config });
@@ -35,24 +39,49 @@ async function handleComputationTask(message, config, dependencies) {
35
39
 
36
40
  if (!date || !pass || !computation) { logger.log('ERROR', `[Worker] Invalid payload.`, data); return; }
37
41
 
38
- logger.log('INFO', `[Worker] 📥 Received Task: ${computation} (${date})`, {
42
+ // [FIX] Ensure retryCount defaults to 1 (PubSub usually sends 1 for the first attempt)
43
+ const retryCount = message.deliveryAttempt || 1;
44
+
45
+ // [FIX] Changed condition to '>' so attempts 1, 2, and 3 are allowed to run.
46
+ if (retryCount > MAX_RETRIES) {
47
+ logger.log('ERROR', `[Worker] ☠️ Task POISONED. Moved to DLQ: ${computation}`);
48
+ try {
49
+ await db.collection('computation_dead_letter_queue').add({
50
+ originalData: data,
51
+ dispatchId: dispatchId,
52
+ error: { message: 'Max Retries Exceeded', stack: 'PubSub delivery limit reached' },
53
+ finalAttemptAt: new Date(),
54
+ failureReason: 'MAX_RETRIES_EXCEEDED'
55
+ });
56
+
57
+ // [FIX] CRITICAL: Update Ledger to FAILED.
58
+ // Previously, this returned without updating, leaving the Ledger stuck in 'PENDING'.
59
+ // Now we explicitly mark it FAILED so the pipeline knows it's dead.
60
+ await db.collection(`computation_audit_ledger/${date}/passes/${pass}/tasks`).doc(computation).set({
61
+ status: 'FAILED',
62
+ error: 'Max Retries Exceeded (Poison Message)',
63
+ failedAt: new Date()
64
+ }, { merge: true });
65
+
66
+ return;
67
+ } catch (dlqErr) { logger.log('FATAL', `[Worker] Failed to write to DLQ`, dlqErr); }
68
+ }
69
+
70
+ logger.log('INFO', `[Worker] 📥 Received Task: ${computation} (${date}) [Attempt ${retryCount}/${MAX_RETRIES}]`, {
39
71
  dispatchId: dispatchId || 'legacy',
40
72
  reason: triggerReason
41
73
  });
42
74
 
43
- // [NEW] LEASE CLAIMING
44
- // Mark task as IN_PROGRESS and set a lease timeout (e.g., 20 minutes) to prevent Zombies
75
+ // Mark task as IN_PROGRESS (Visual only, dispatcher does not use this for logic anymore)
45
76
  try {
46
- const leaseTimeMs = (config.workerLeaseMinutes || 20) * 60 * 1000;
47
77
  await db.collection(`computation_audit_ledger/${date}/passes/${pass}/tasks`).doc(computation).set({
48
78
  status: 'IN_PROGRESS',
49
79
  workerId: process.env.K_REVISION || 'unknown',
50
80
  startedAt: new Date(),
51
- leaseExpiresAt: Date.now() + leaseTimeMs,
52
81
  dispatchId: dispatchId
53
82
  }, { merge: true });
54
83
  } catch (leaseErr) {
55
- logger.log('WARN', `[Worker] Failed to claim lease for ${computation}. Continuing anyway...`, leaseErr);
84
+ logger.log('WARN', `[Worker] Failed to update status to IN_PROGRESS for ${computation}. Continuing...`, leaseErr);
56
85
  }
57
86
 
58
87
  let computationManifest;
@@ -73,7 +102,7 @@ async function handleComputationTask(message, config, dependencies) {
73
102
  runDependencies,
74
103
  computationManifest,
75
104
  previousCategory,
76
- dependencyResultHashes // [NEW] Pass hashes to executor
105
+ dependencyResultHashes
77
106
  );
78
107
  const duration = Date.now() - startTime;
79
108
 
@@ -121,28 +150,30 @@ async function handleComputationTask(message, config, dependencies) {
121
150
  finalAttemptAt: new Date(),
122
151
  failureReason: 'PERMANENT_DETERMINISTIC_ERROR'
123
152
  });
153
+
154
+ // [FIX] Update Ledger to FAILED immediately for deterministic errors
155
+ await db.collection(`computation_audit_ledger/${date}/passes/${pass}/tasks`).doc(computation).set({
156
+ status: 'FAILED',
157
+ error: err.message || 'Permanent Deterministic Error',
158
+ failedAt: new Date()
159
+ }, { merge: true });
160
+
124
161
  await recordRunAttempt(db, { date, computation, pass }, 'FAILURE', { message: err.message, stage: err.stage || 'PERMANENT_FAIL' }, { durationMs: 0 }, triggerReason);
125
162
  return;
126
163
  } catch (dlqErr) { logger.log('FATAL', `[Worker] Failed to write to DLQ`, dlqErr); }
127
164
  }
128
165
 
129
- const retryCount = message.deliveryAttempt || 0;
166
+ // Standard Retryable Error (Crash)
130
167
  if (retryCount >= MAX_RETRIES) {
131
- logger.log('ERROR', `[Worker] ☠️ Task POISONED. Moved to DLQ: ${computation}`);
132
- try {
133
- await db.collection('computation_dead_letter_queue').add({
134
- originalData: data,
135
- dispatchId: dispatchId,
136
- error: { message: err.message, stack: err.stack },
137
- finalAttemptAt: new Date(),
138
- failureReason: 'MAX_RETRIES_EXCEEDED'
139
- });
140
- return;
141
- } catch (dlqErr) { logger.log('FATAL', `[Worker] Failed to write to DLQ`, dlqErr); }
168
+ // We throw here, PubSub will retry, and the "Poison Logic" at the top
169
+ // will catch it on the NEXT attempt to finalize the failure.
170
+ throw err;
142
171
  }
143
172
 
144
173
  logger.log('ERROR', `[Worker] ❌ Crash: ${computation}: ${err.message}`);
174
+
145
175
  await recordRunAttempt(db, { date, computation, pass }, 'CRASH', { message: err.message, stack: err.stack, stage: 'SYSTEM_CRASH' }, { durationMs: 0 }, triggerReason);
176
+ // Throwing triggers Pub/Sub retry
146
177
  throw err;
147
178
  }
148
179
  }
@@ -0,0 +1,81 @@
1
+ /**
2
+ * @fileoverview Enforces the contracts discovered by the offline tool.
3
+ * Designed to be permissive with volatility ("Anomalies") but strict with logic ("Violations").
4
+ */
5
+ class ContractValidator {
6
+
7
+ /**
8
+ * @param {Object} result - The production output (single item or batch).
9
+ * @param {Object} contract - The loaded contract JSON.
10
+ * @returns {Object} { valid: boolean, reason: string }
11
+ */
12
+ static validate(result, contract) {
13
+ if (!result || !contract) return { valid: true };
14
+
15
+ // Handle Batches (StandardExecutor produces map of User -> Result)
16
+ const items = Object.values(result);
17
+ if (items.length === 0) return { valid: true };
18
+
19
+ // We check a sample to save CPU, or check all if critical
20
+ // For "Cohort" logic, we usually check all because one bad apple spoils the average.
21
+ for (const item of items) {
22
+ const check = this._validateItem(item, contract);
23
+ if (!check.valid) return check;
24
+ }
25
+
26
+ return { valid: true };
27
+ }
28
+
29
+ static _validateItem(item, contract) {
30
+ // 1. Structure Check
31
+ if (contract.requiredKeys) {
32
+ for (const key of contract.requiredKeys) {
33
+ if (item[key] === undefined) {
34
+ return { valid: false, reason: `Schema Violation: Missing key '${key}'` };
35
+ }
36
+ }
37
+ }
38
+
39
+ // 2. Numeric Physics Check (Hard Bounds)
40
+ if (contract.numericBounds) {
41
+ for (const [key, bounds] of Object.entries(contract.numericBounds)) {
42
+ const val = item[key];
43
+ if (typeof val !== 'number') continue;
44
+
45
+ if (val < bounds.min) {
46
+ return { valid: false, reason: `Physics Violation: ${key} (${val}) is below hard limit ${bounds.min}` };
47
+ }
48
+ if (val > bounds.max) {
49
+ return { valid: false, reason: `Physics Violation: ${key} (${val}) is above hard limit ${bounds.max}` };
50
+ }
51
+ }
52
+ }
53
+
54
+ // 3. Statistical Sanity Check (Soft Bounds)
55
+ // We generally DO NOT BLOCK on this for financial data, unless it's egregious.
56
+ // We block if it's "Mathematically Impossible" based on the distribution.
57
+ if (contract.distributions) {
58
+ for (const [key, dist] of Object.entries(contract.distributions)) {
59
+ const val = item[key];
60
+ if (typeof val !== 'number') continue;
61
+
62
+ const diff = Math.abs(val - dist.mean);
63
+ const sigmas = diff / dist.stdDev;
64
+
65
+ // 15 Sigma is our "Ridiculousness Threshold".
66
+ // Even crypto doesn't move 15 standard deviations in one calculation step
67
+ // unless the data is corrupt (e.g. integer overflow, or bad scraping).
68
+ if (sigmas > 15 && diff > 1.0) { // Ensure diff is material
69
+ return {
70
+ valid: false,
71
+ reason: `Statistical Impossibility: ${key} is ${sigmas.toFixed(1)} sigmas from mean. Value: ${val}, Mean: ${dist.mean}`
72
+ };
73
+ }
74
+ }
75
+ }
76
+
77
+ return { valid: true };
78
+ }
79
+ }
80
+
81
+ module.exports = ContractValidator;
@@ -4,6 +4,7 @@
4
4
  * UPDATED: Implements Content-Based Hashing (ResultHash) for dependency short-circuiting.
5
5
  * UPDATED: Auto-enforces Weekend Mode validation.
6
6
  * UPDATED: Implements "Initial Write" logic to wipe stale data/shards on a fresh run.
7
+ * UPDATED: Implements "Contract Validation" (Semantic Gates) to block logical violations.
7
8
  * OPTIMIZED: Fetches pre-calculated 'simHash' from Registry (removes expensive simulation step).
8
9
  */
9
10
  const { commitBatchInChunks, generateDataHash } = require('../utils/utils');
@@ -11,6 +12,7 @@ const { updateComputationStatus } = require('./StatusRepository');
11
12
  const { batchStoreSchemas } = require('../utils/schema_capture');
12
13
  const { generateProcessId, PROCESS_TYPES } = require('../logger/logger');
13
14
  const { HeuristicValidator } = require('./ResultsValidator');
15
+ const ContractValidator = require('./ContractValidator'); // [NEW]
14
16
  const validationOverrides = require('../config/validation_overrides');
15
17
  const pLimit = require('p-limit');
16
18
  const zlib = require('zlib');
@@ -20,6 +22,7 @@ const NON_RETRYABLE_ERRORS = [
20
22
  ];
21
23
 
22
24
  const SIMHASH_REGISTRY_COLLECTION = 'system_simhash_registry';
25
+ const CONTRACTS_COLLECTION = 'system_contracts'; // [NEW]
23
26
 
24
27
  /**
25
28
  * Commits results to Firestore.
@@ -40,6 +43,10 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
40
43
 
41
44
  const fanOutLimit = pLimit(10);
42
45
 
46
+ // [NEW] Bulk fetch contracts for all calcs in this batch to minimize latency
47
+ // This prevents N+1 reads during the loop
48
+ const contractMap = await fetchContracts(db, Object.keys(stateObj));
49
+
43
50
  for (const name in stateObj) {
44
51
  const calc = stateObj[name];
45
52
  const execStats = calc._executionStats || { processedUsers: 0, skippedUsers: 0 };
@@ -68,7 +75,23 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
68
75
  };
69
76
  }
70
77
 
71
- // Validation
78
+ // 1. SEMANTIC GATE (CONTRACT VALIDATION) [NEW]
79
+ // We run this BEFORE Heuristics because it catches "Logic Bugs" vs "Data Noise"
80
+ const contract = contractMap[name];
81
+ if (contract) {
82
+ const contractCheck = ContractValidator.validate(result, contract);
83
+ if (!contractCheck.valid) {
84
+ // STOP THE CASCADE: Fail this specific calculation
85
+ runMetrics.validation.isValid = false;
86
+ runMetrics.validation.anomalies.push(contractCheck.reason);
87
+
88
+ const semanticError = new Error(contractCheck.reason);
89
+ semanticError.stage = 'SEMANTIC_GATE';
90
+ throw semanticError;
91
+ }
92
+ }
93
+
94
+ // 2. HEURISTIC VALIDATION (Data Integrity)
72
95
  if (result && Object.keys(result).length > 0) {
73
96
  const healthCheck = HeuristicValidator.analyze(calc.manifest.name, result, dStr, effectiveOverrides);
74
97
  if (!healthCheck.valid) {
@@ -87,15 +110,11 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
87
110
  let simHash = null;
88
111
  if (calc.manifest.hash && flushMode !== 'INTERMEDIATE') {
89
112
  try {
90
- // Fast O(1) lookup using Code Hash
91
- // We simply check if the BuildReporter has already stamped this code version
92
113
  const regDoc = await db.collection(SIMHASH_REGISTRY_COLLECTION).doc(calc.manifest.hash).get();
93
114
  if (regDoc.exists) {
94
115
  simHash = regDoc.data().simHash;
95
116
  } else {
96
- // Fallback: This happens if BuildReporter didn't run or is out of sync.
97
- // We do NOT run SimRunner here to protect production performance.
98
- logger.log('WARN', `[ResultCommitter] SimHash not found in registry for ${name} (Hash: ${calc.manifest.hash}). Is BuildReporter skipped?`);
117
+ logger.log('WARN', `[ResultCommitter] SimHash not found in registry for ${name}.`);
99
118
  }
100
119
  } catch (regErr) {
101
120
  logger.log('WARN', `[ResultCommitter] Failed to read SimHash registry: ${regErr.message}`);
@@ -110,7 +129,7 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
110
129
  if (calc.manifest.hash) {
111
130
  successUpdates[name] = {
112
131
  hash: calc.manifest.hash,
113
- simHash: simHash, // [NEW] Populated from Registry
132
+ simHash: simHash,
114
133
  resultHash: resultHash,
115
134
  dependencyResultHashes: calc.manifest.dependencyResultHashes || {},
116
135
  category: calc.manifest.category,
@@ -145,7 +164,7 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
145
164
  if (calc.manifest.hash) {
146
165
  successUpdates[name] = {
147
166
  hash: calc.manifest.hash,
148
- simHash: simHash, // [NEW] Populated from Registry
167
+ simHash: simHash,
149
168
  resultHash: resultHash,
150
169
  dependencyResultHashes: calc.manifest.dependencyResultHashes || {},
151
170
  category: calc.manifest.category,
@@ -173,7 +192,7 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
173
192
  if (calc.manifest.hash) {
174
193
  successUpdates[name] = {
175
194
  hash: calc.manifest.hash,
176
- simHash: simHash, // [NEW] Populated from Registry
195
+ simHash: simHash,
177
196
  resultHash: resultHash,
178
197
  dependencyResultHashes: calc.manifest.dependencyResultHashes || {},
179
198
  category: calc.manifest.category,
@@ -209,6 +228,30 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
209
228
  return { successUpdates, failureReport, shardIndexes: nextShardIndexes };
210
229
  }
211
230
 
231
+ /**
232
+ * [NEW] Helper to fetch contracts for a list of calculations
233
+ */
234
+ async function fetchContracts(db, calcNames) {
235
+ if (!calcNames || calcNames.length === 0) return {};
236
+ const map = {};
237
+
238
+ // In a high-throughput system, we might cache these in memory (LRU)
239
+ // For now, we fetch from Firestore efficiently.
240
+ const refs = calcNames.map(name => db.collection(CONTRACTS_COLLECTION).doc(name));
241
+
242
+ try {
243
+ const snaps = await db.getAll(...refs);
244
+ snaps.forEach(snap => {
245
+ if (snap.exists) {
246
+ map[snap.id] = snap.data();
247
+ }
248
+ });
249
+ } catch (e) {
250
+ console.warn(`[ResultCommitter] Failed to fetch contracts batch: ${e.message}`);
251
+ }
252
+ return map;
253
+ }
254
+
212
255
  async function writeSingleResult(result, docRef, name, dateContext, logger, config, deps, startShardIndex = 0, flushMode = 'STANDARD', isInitialWrite = false) {
213
256
 
214
257
  // Transition & Cleanup Logic
@@ -0,0 +1,128 @@
1
+ /**
2
+ * @fileoverview Discovery Script: UpdateContracts.js
3
+ * Runs offline simulations to "learn" the behavioral contracts of all calculations.
4
+ * Saves these contracts to Firestore for the Runtime Enforcer (ResultCommitter) to use.
5
+ * * USAGE:
6
+ * node computation-system/scripts/UpdateContracts.js [--calc=CalcName]
7
+ */
8
+
9
+ const path = require('path');
10
+ const admin = require('firebase-admin');
11
+
12
+ // Initialize Firebase (Standard Env Check)
13
+ if (!admin.apps.length) {
14
+ if (process.env.GOOGLE_APPLICATION_CREDENTIALS) {
15
+ admin.initializeApp();
16
+ } else {
17
+ // Fallback for local dev if key path isn't set in env
18
+ console.warn("⚠️ No GOOGLE_APPLICATION_CREDENTIALS. Attempting default init...");
19
+ admin.initializeApp();
20
+ }
21
+ }
22
+
23
+ const db = admin.firestore();
24
+ const { StructuredLogger } = require('../logger/logger');
25
+ const { getManifest } = require('../topology/ManifestLoader');
26
+ const ContractDiscoverer = require('../tools/ContractDiscoverer');
27
+
28
+ // Load Calculations Package
29
+ let calculationPackage;
30
+ try {
31
+ // Adjust path if necessary for your local monorepo structure
32
+ calculationPackage = require('aiden-shared-calculations-unified');
33
+ } catch (e) {
34
+ console.error("FATAL: Could not load 'aiden-shared-calculations-unified'. Ensure you are in the correct directory or npm link is active.");
35
+ process.exit(1);
36
+ }
37
+
38
+ const CONTRACTS_COLLECTION = 'system_contracts';
39
+
40
+ async function main() {
41
+ const logger = new StructuredLogger({ enableConsole: true });
42
+
43
+ // 1. Setup Dependencies
44
+ // The ManifestLoader and Discoverer need a mock dependency object
45
+ const mockDeps = {
46
+ db,
47
+ logger,
48
+ // Mock specific utils if needed by your calculations during instantiation
49
+ calculationUtils: {
50
+ loadInstrumentMappings: async () => ({ instrumentToTicker: {}, tickerToInstrument: {} })
51
+ }
52
+ };
53
+
54
+ console.log("🚀 Starting Contract Discovery...");
55
+
56
+ // 2. Load Manifest
57
+ const calculations = calculationPackage.calculations;
58
+ const manifest = getManifest([], calculations, mockDeps);
59
+ const manifestMap = new Map(manifest.map(c => [c.name, c]));
60
+
61
+ console.log(`ℹ️ Loaded manifest with ${manifest.length} calculations.`);
62
+
63
+ // 3. Filter Target (Optional CLI Arg)
64
+ const targetArg = process.argv.find(a => a.startsWith('--calc='));
65
+ const targetName = targetArg ? targetArg.split('=')[1] : null;
66
+
67
+ let calcsToProcess = manifest;
68
+ if (targetName) {
69
+ calcsToProcess = manifest.filter(c => c.name.toLowerCase() === targetName.toLowerCase());
70
+ if (calcsToProcess.length === 0) {
71
+ console.error(`❌ Calculation '${targetName}' not found.`);
72
+ process.exit(1);
73
+ }
74
+ }
75
+
76
+ // 4. Run Discovery Loop
77
+ let successCount = 0;
78
+ let skipCount = 0;
79
+
80
+ for (const calc of calcsToProcess) {
81
+ // Skip computations that don't produce data (like aggregators without schema)
82
+ if (!calc.class.getSchema && !calc.dependencies) {
83
+ console.log(`⏭️ Skipping ${calc.name} (No schema/outputs to analyze).`);
84
+ skipCount++;
85
+ continue;
86
+ }
87
+
88
+ try {
89
+ // A. Discover Contract via Simulation
90
+ // We run 50 iterations to get a statistically significant sample
91
+ const contract = await ContractDiscoverer.generateContract(calc, manifestMap, 50);
92
+
93
+ if (contract) {
94
+ // B. Enrich with Metadata
95
+ // FIX: Create a NEW object to satisfy Type Checking (avoid mutating the inferred shape)
96
+ const finalContract = {
97
+ ...contract,
98
+ lastUpdated: new Date(),
99
+ generatedBy: 'UpdateContracts.js',
100
+ version: '1.0'
101
+ };
102
+
103
+ // C. Save to Firestore
104
+ // Use finalContract instead of contract
105
+ await db.collection(CONTRACTS_COLLECTION).doc(calc.name).set(finalContract);
106
+ console.log(`✅ [SAVED] Contract for ${calc.name}`);
107
+ successCount++;
108
+ } else {
109
+ console.warn(`⚠️ [EMPTY] No contract generated for ${calc.name} (Insufficient data/samples).`);
110
+ skipCount++;
111
+ }
112
+
113
+ } catch (err) {
114
+ console.error(`❌ [ERROR] Failed to generate contract for ${calc.name}:`, err.message);
115
+ }
116
+ }
117
+
118
+ console.log("\n============================================");
119
+ console.log(`🎉 Discovery Complete.`);
120
+ console.log(` Updated: ${successCount}`);
121
+ console.log(` Skipped: ${skipCount}`);
122
+ console.log("============================================");
123
+ }
124
+
125
+ main().catch(err => {
126
+ console.error("FATAL SCRIPT ERROR:", err);
127
+ process.exit(1);
128
+ });
@@ -112,11 +112,11 @@ class Fabricator {
112
112
  id: String(userId),
113
113
  type: type || 'all',
114
114
  portfolio: {
115
- today: isSpeculator ? this._genSpecPortfolio(userId) : this._genNormalPortfolio(userId),
115
+ today: isSpeculator ? this._genSpecPortfolio(userId) : this._genNormalPortfolio(userId),
116
116
  yesterday: isSpeculator ? this._genSpecPortfolio(userId) : this._genNormalPortfolio(userId)
117
117
  },
118
118
  history: {
119
- today: { PublicHistoryPositions: this._genHistoryTrades(userId) },
119
+ today: { PublicHistoryPositions: this._genHistoryTrades(userId) },
120
120
  yesterday: { PublicHistoryPositions: this._genHistoryTrades(userId) }
121
121
  }
122
122
  };
@@ -3,6 +3,7 @@
3
3
  * Generates a "Pre-Flight" report of what the computation system WILL do.
4
4
  * UPGRADED: Implements Behavioral Hashing (SimHash) to detect Cosmetic vs Logic changes.
5
5
  * OPTIMIZED: Caches SimHashes and actively updates status for Stable items to prevent re-runs.
6
+ * OPTIMIZED (V2): Implements System Fingerprinting to skip 90-day scan if manifest is identical.
6
7
  */
7
8
 
8
9
  const { analyzeDateExecution } = require('../WorkflowOrchestrator');
@@ -12,12 +13,23 @@ const { checkRootDataAvailability } = req
12
13
  const SimRunner = require('../simulation/SimRunner');
13
14
  const pLimit = require('p-limit');
14
15
  const path = require('path');
16
+ const crypto = require('crypto');
15
17
  const packageJson = require(path.join(__dirname, '..', '..', '..', 'package.json'));
16
18
  const packageVersion = packageJson.version;
17
19
 
18
20
  // Persistent Registry for SimHashes (so Workers don't have to recalc)
19
21
  const SIMHASH_REGISTRY_COLLECTION = 'system_simhash_registry';
20
22
 
23
+ /**
24
+ * Helper: Generates a unique signature for the entire computation system state.
25
+ * If ANY calculation logic or dependency changes, this hash changes.
26
+ */
27
+ function getSystemFingerprint(manifest) {
28
+ // Sort to ensure determinism
29
+ const sortedHashes = manifest.map(c => c.hash).sort().join('|');
30
+ return crypto.createHash('sha256').update(sortedHashes).digest('hex');
31
+ }
32
+
21
33
  /**
22
34
  * Helper: Determines if a calculation should be excluded from the report.
23
35
  */
@@ -148,8 +160,40 @@ async function ensureBuildReport(config, dependencies, manifest) {
148
160
 
149
161
  if (!shouldRun) { logger.log('INFO', `[BuildReporter] 🔒 Report for v${packageVersion} locked. Skipping.`); return; }
150
162
 
151
- logger.log('INFO', `[BuildReporter] 🚀 Running Pre-flight Report for v${packageVersion}...`);
152
- await generateBuildReport(config, dependencies, manifest, 90, buildId);
163
+ // [NEW] 1. Calculate Current System Fingerprint
164
+ const currentSystemHash = getSystemFingerprint(manifest);
165
+
166
+ // [NEW] 2. Fetch Last Build's Fingerprint
167
+ const latestBuildDoc = await db.collection('computation_build_records').doc('latest').get();
168
+
169
+ if (latestBuildDoc.exists) {
170
+ const latestData = latestBuildDoc.data();
171
+
172
+ // [OPTIMIZATION] If signatures match, we can clone the report or just skip
173
+ if (latestData.systemFingerprint === currentSystemHash) {
174
+ logger.log('INFO', `[BuildReporter] ⚡ System Fingerprint (${currentSystemHash.substring(0,8)}) matches latest build. Skipping Report.`);
175
+
176
+ // Create a "Skipped" record for the new version so we know it deployed
177
+ await db.collection('computation_build_records').doc(buildId).set({
178
+ buildId,
179
+ packageVersion,
180
+ systemFingerprint: currentSystemHash,
181
+ status: 'SKIPPED_IDENTICAL',
182
+ referenceBuild: latestData.buildId, // Pointer to the build that actually did the work
183
+ generatedAt: new Date().toISOString()
184
+ });
185
+
186
+ // Release lock and exit
187
+ lockRef.update({ status: 'SKIPPED', completedAt: new Date() }).catch(() => {});
188
+ return;
189
+ }
190
+ }
191
+
192
+ logger.log('INFO', `[BuildReporter] 🚀 Change Detected. Running Pre-flight Report for v${packageVersion}...`);
193
+
194
+ // Pass the fingerprint to generateBuildReport so it can save it
195
+ await generateBuildReport(config, dependencies, manifest, 90, buildId, currentSystemHash);
196
+
153
197
  lockRef.update({ status: 'COMPLETED', completedAt: new Date() }).catch(() => {});
154
198
 
155
199
  } catch (e) {
@@ -160,11 +204,14 @@ async function ensureBuildReport(config, dependencies, manifest) {
160
204
  /**
161
205
  * Generates the report, writes to Firestore, AND FIXES STABLE UPDATES.
162
206
  */
163
- async function generateBuildReport(config, dependencies, manifest, daysBack = 90, customBuildId = null) {
207
+ async function generateBuildReport(config, dependencies, manifest, daysBack = 90, customBuildId = null, systemFingerprint = null) {
164
208
  const { db, logger } = dependencies;
165
209
  const buildId = customBuildId || `manual_${Date.now()}`;
166
210
 
167
- logger.log('INFO', `[BuildReporter] Generating Build Report: ${buildId} (Scope: ${daysBack} days)...`);
211
+ // Calculate fingerprint if not provided (for manual runs)
212
+ const finalFingerprint = systemFingerprint || getSystemFingerprint(manifest);
213
+
214
+ logger.log('INFO', `[BuildReporter] Generating Build Report: ${buildId} (Scope: ${daysBack} days, Fingerprint: ${finalFingerprint.substring(0,8)})...`);
168
215
 
169
216
  const today = new Date();
170
217
  const startDate = new Date();
@@ -188,7 +235,14 @@ async function generateBuildReport(config, dependencies, manifest, daysBack = 90
188
235
  }
189
236
  });
190
237
 
191
- const reportHeader = { buildId, packageVersion, generatedAt: new Date().toISOString(), summary: {}, _sharded: true };
238
+ const reportHeader = {
239
+ buildId,
240
+ packageVersion,
241
+ systemFingerprint: finalFingerprint, // Saved to Firestore
242
+ generatedAt: new Date().toISOString(),
243
+ summary: {},
244
+ _sharded: true
245
+ };
192
246
 
193
247
  let totalRun = 0, totalReRun = 0, totalStable = 0;
194
248
  const detailWrites = [];
@@ -0,0 +1,144 @@
1
+ /**
2
+ * @fileoverview Analyzes calculation behavior via Monte Carlo simulation
3
+ * to generate "Loose" but mathematically sound contracts.
4
+ */
5
+ const SimRunner = require('../simulation/SimRunner');
6
+ const { MathPrimitives } = require('../layers/mathematics');
7
+ const { normalizeName } = require('../utils/utils');
8
+
9
+ class ContractDiscoverer {
10
+
11
+ static async generateContract(calcManifest, fullManifestMap, iterations = 50) {
12
+ console.log(`[ContractDiscoverer] 🕵️‍♀️ Learning behavior for: ${calcManifest.name}`);
13
+
14
+ const samples = [];
15
+ const errors = [];
16
+
17
+ // 1. Monte Carlo Simulation
18
+ // Run the code against 50 different "universes" of data to see how it behaves.
19
+ for (let i = 0; i < iterations; i++) {
20
+ try {
21
+ // We use your existing SimRunner, which uses Fabricator
22
+ // The SimRunner needs to return the RAW result, not the hash.
23
+ // You might need a small helper in SimRunner or just instantiate directly here:
24
+ const result = await this._runSimulationRaw(calcManifest, fullManifestMap, i);
25
+ if (result) samples.push(result);
26
+ } catch (e) {
27
+ errors.push(e.message);
28
+ }
29
+ }
30
+
31
+ if (samples.length < 5) {
32
+ console.warn(`[ContractDiscoverer] ⚠️ Insufficient samples for ${calcManifest.name}. Skipping.`);
33
+ return null;
34
+ }
35
+
36
+ // 2. Statistical Inference
37
+ // We now analyze the 50 outputs to find "Invariants"
38
+ return this._inferContractFromSamples(samples, calcManifest.type);
39
+ }
40
+
41
+ // Helper to bypass the hashing logic of SimRunner and get raw object
42
+ static async _runSimulationRaw(manifest, map, seed) {
43
+ const Fabricator = require('../simulation/Fabricator');
44
+ const fabricator = new Fabricator(manifest.name + '_seed_' + seed);
45
+ const context = await fabricator.generateContext(manifest, map, seed);
46
+ const instance = new manifest.class();
47
+ await instance.process(context);
48
+ return instance.getResult ? await instance.getResult() : (instance.results || {});
49
+ }
50
+
51
+ static _inferContractFromSamples(samples, type) {
52
+ // Flatten samples if it's a Standard (Batch) calculation
53
+ // We want to analyze "What does A USER result look like?"
54
+ let flattened = samples;
55
+ if (type === 'standard') {
56
+ flattened = [];
57
+ samples.forEach(batch => {
58
+ Object.values(batch).forEach(userResult => flattened.push(userResult));
59
+ });
60
+ }
61
+
62
+ // Initialize Rule Set
63
+ const contract = {
64
+ requiredKeys: new Set(),
65
+ numericBounds: {}, // { min, max, isInteger }
66
+ distributions: {}, // { mean, stdDev }
67
+ enums: {}, // { allowedValues }
68
+ dataTypes: {} // { key: 'number' | 'string' | 'object' }
69
+ };
70
+
71
+ if (flattened.length === 0) return null;
72
+
73
+ // A. Structural Analysis (Keys & Types)
74
+ const first = flattened[0];
75
+ if (typeof first === 'object' && first !== null) {
76
+ Object.keys(first).forEach(key => contract.requiredKeys.add(key));
77
+
78
+ Object.keys(first).forEach(key => {
79
+ contract.dataTypes[key] = typeof first[key];
80
+
81
+ // Track all values for this key to find bounds
82
+ const values = flattened.map(item => item[key]).filter(v => v !== null && v !== undefined);
83
+
84
+ // B. Numeric Analysis (The "Power" part)
85
+ if (typeof first[key] === 'number') {
86
+ this._analyzeNumericField(key, values, contract);
87
+ }
88
+
89
+ // C. Categorical Analysis
90
+ if (typeof first[key] === 'string') {
91
+ const unique = new Set(values);
92
+ if (unique.size < 10) { // If only a few distinct strings, it's an Enum
93
+ contract.enums[key] = Array.from(unique);
94
+ }
95
+ }
96
+ });
97
+ }
98
+
99
+ return {
100
+ ...contract,
101
+ requiredKeys: Array.from(contract.requiredKeys)
102
+ };
103
+ }
104
+
105
+ static _analyzeNumericField(key, values, contract) {
106
+ if (values.length === 0) return;
107
+
108
+ const min = Math.min(...values);
109
+ const max = Math.max(...values);
110
+ const avg = MathPrimitives.average(values);
111
+ const dev = MathPrimitives.standardDeviation(values);
112
+
113
+ // 1. Detect "Hard" Physics Limits (Probability, Ratios)
114
+ // If the value NEVER goes below 0 or above 1 in 50 runs, assume it's a Ratio.
115
+ // We assume "Financial Volatility" creates large numbers, but "Ratios" stay small.
116
+ const isRatio = (min >= 0 && max <= 1.0);
117
+ const isPercentage = (min >= 0 && max <= 100.0 && max > 1.0); // e.g. RSI
118
+ const isPositive = (min >= 0);
119
+
120
+ contract.numericBounds[key] = {
121
+ // We do NOT set strict upper bounds for financial values (Price, Vol, PnL)
122
+ // because crypto/finance can do 1000x.
123
+ // We ONLY set strict bounds for Ratios/Percentages.
124
+ min: isPositive ? 0 : -Infinity,
125
+ max: (isRatio ? 1.0 : (isPercentage ? 100.0 : Infinity))
126
+ };
127
+
128
+ // 2. Detect "Soft" Statistical Envelopes (6 Sigma)
129
+ // This handles the "Ridiculously Volatile" case.
130
+ // 6 Sigma covers 99.9999998% of cases even in non-normal distributions (Chebyshev's inequality).
131
+ // If a value is 20 Sigma away, it's likely a bug (e.g., Unix Timestamp interpreted as Price).
132
+ if (dev > 0) {
133
+ contract.distributions[key] = {
134
+ mean: avg,
135
+ stdDev: dev,
136
+ // "Loose" Envelope: 10 Standard Deviations allowed.
137
+ // This allows for massive volatility but catches data corruption.
138
+ sigmaLimit: 10
139
+ };
140
+ }
141
+ }
142
+ }
143
+
144
+ module.exports = ContractDiscoverer;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bulltrackers-module",
3
- "version": "1.0.282",
3
+ "version": "1.0.284",
4
4
  "description": "Helper Functions for Bulltrackers.",
5
5
  "main": "index.js",
6
6
  "files": [