bulltrackers-module 1.0.293 → 1.0.295

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -120,8 +120,8 @@ class DataExtractor {
120
120
  return "Buy";
121
121
  }
122
122
 
123
- static getLeverage(position) { return position ? (position.Leverage || 1) : 1; }
124
- static getOpenRate(position) { return position ? (position.OpenRate || 0) : 0; }
123
+ static getLeverage(position) { return position ? (position.Leverage || 1) : 1; }
124
+ static getOpenRate(position) { return position ? (position.OpenRate || 0) : 0; }
125
125
  static getCurrentRate(position) { return position ? (position.CurrentRate || 0) : 0; }
126
126
  static getStopLossRate(position) {
127
127
  const rate = position ? (position.StopLossRate || 0) : 0;
@@ -207,7 +207,7 @@ class HistoryExtractor {
207
207
  });
208
208
  }
209
209
  const asset = assetsMap.get(instId);
210
- const open = new Date(t.OpenDateTime);
210
+ const open = new Date(t.OpenDateTime);
211
211
  const close = new Date(t.CloseDateTime);
212
212
  const durationMins = (close - open) / 60000;
213
213
  if (durationMins > 0) {
@@ -287,25 +287,25 @@ class InsightsExtractor {
287
287
  return insights.find(i => i.instrumentId === instrumentId) || null;
288
288
  }
289
289
 
290
- static getTotalOwners(insight) { return insight ? (insight.total || 0) : 0; }
291
- static getLongPercent(insight) { return insight ? (insight.buy || 0) : 0; }
292
- static getShortPercent(insight) { return insight ? (insight.sell || 0) : 0; }
290
+ static getTotalOwners(insight) { return insight ? (insight.total || 0) : 0; }
291
+ static getLongPercent(insight) { return insight ? (insight.buy || 0) : 0; }
292
+ static getShortPercent(insight) { return insight ? (insight.sell || 0) : 0; }
293
293
  static getGrowthPercent(insight) { return insight ? (insight.growth || 0) : 0; }
294
294
 
295
295
  static getLongCount(insight) {
296
- const total = this.getTotalOwners(insight);
296
+ const total = this.getTotalOwners(insight);
297
297
  const buyPct = this.getLongPercent(insight);
298
298
  return Math.floor(total * (buyPct / 100));
299
299
  }
300
300
 
301
301
  static getShortCount(insight) {
302
- const total = this.getTotalOwners(insight);
302
+ const total = this.getTotalOwners(insight);
303
303
  const sellPct = this.getShortPercent(insight);
304
304
  return Math.floor(total * (sellPct / 100));
305
305
  }
306
306
 
307
307
  static getNetOwnershipChange(insight) {
308
- const total = this.getTotalOwners(insight);
308
+ const total = this.getTotalOwners(insight);
309
309
  const growth = this.getGrowthPercent(insight);
310
310
  if (total === 0) return 0;
311
311
  const prevTotal = total / (1 + (growth / 100));
@@ -0,0 +1,93 @@
1
+ # The BullTrackers Computation System: An Advanced DAG-Based Architecture for High-Fidelity Financial Simulation
2
+
3
+ ## Abstract
4
+
5
+ This paper details the design, implementation, and theoretical underpinnings of the BullTrackers Computation System, a proprietary high-performance execution engine designed for complex financial modeling and user behavior analysis. The system leverages a Directed Acyclic Graph (DAG) architecture to orchestrate interdependent calculations, employing Kahn’s Algorithm for topological sorting and Tarjan’s Algorithm for cycle detection. Key innovations include "Content-Based Dependency Short-Circuiting" for massive optimization, a "System Epoch" and "Infrastructure Hash" based auditing system for absolute reproducibility, and a batch-flushing execution model designed to mitigate Out-Of-Memory (OOM) errors during high-volume processing. We further explore the application of this system in running advanced psychometric and risk-geometry models ("Smart Money" scoring) and how the architecture supports self-healing workflows through granular state management.
6
+
7
+ ## 1. Introduction
8
+
9
+ In modern financial analytics, derived data often depends on a complex web of varying input frequencies—real-time price ticks, daily portfolio snapshots, and historical trade logs. Traditional linear batch processing protocols fail to capture the nuances of these interdependencies, often leading to race conditions or redundant computations.
10
+
11
+ The BullTrackers Computation System was devised to solve this by treating the entire domain logic as a **Directed Acyclic Graph (DAG)**. Every calculation is a node, and every data requirement is an edge. By resolving the topography of this graph dynamically at runtime, the system ensures that:
12
+ 1. Data is always available before it is consumed (referential integrity).
13
+ 2. Only necessary computations are executed (efficiency).
14
+ 3. Changes in code or infrastructure propagate deterministically through the graph (auditability).
15
+
16
+ ## 2. Theoretical Foundations
17
+
18
+ The core utility of the system is its ability to turn a collection of loosely coupled JavaScript classes into a strictly ordered execution plan.
19
+
20
+ ### 2.1 Directed Acyclic Graphs (DAGs)
21
+ We model the computation space as a DAG where $G = (V, E)$.
22
+ * **Vertices ($V$)**: Individual Calculation Units (e.g., `NetProfit`, [SmartMoneyScore](file:///C:/Users/aiden/Desktop/code_projects/Bulltrackers2025/Backend/Entrypoints/BullTrackers/Backend/Core/bulltrackers-module/functions/computation-system/layers/profiling.js#24-236)).
23
+ * **Edges ($E$)**: Data dependencies, where an edge $(u, v)$ implies $v$ requires the output of $u$.
24
+
25
+ ### 2.2 Topological Sorting (Kahn’s Algorithm)
26
+ To execute the graph, we must linearize it such that for every dependency $u \rightarrow v$, $u$ precedes $v$ in the execution order. We implement **Kahn’s Algorithm** within [ManifestBuilder.js](file:///C:/Users/aiden/Desktop/code_projects/Bulltrackers2025/Backend/Entrypoints/BullTrackers/Backend/Core/bulltrackers-module/functions/computation-system/context/ManifestBuilder.js) to achieve this:
27
+ 1. Calculate the **in-degree** (number of incoming edges) for all nodes.
28
+ 2. Initialize a queue with all nodes having an in-degree of 0 (independent nodes).
29
+ 3. While the queue is not empty:
30
+ * Dequeue node $N$ and add it to the `SortedManifest`.
31
+ * For each neighbor $M$ dependent on $N$, decrement $M$'s in-degree.
32
+ * If $M$'s in-degree becomes 0, enqueue $M$.
33
+ 4. This generates a series of "Passes" or "Waves" of execution, allowing parallel processing of independent nodes within the same pass.
34
+
35
+ ### 2.3 Cycle Detection (Tarjan’s Algorithm)
36
+ A critical failure mode in DAGs is the introduction of a cycle (e.g., A needs B, B needs A), effectively turning the DAG into a DCG (Directed Cyclic Graph), which is unresolvable.
37
+ If Kahn’s algorithm fails to visit all nodes (indicating a cycle exists), the system falls back to **Tarjan’s Strongly Connected Components (SCC) Algorithm**. This uses depth-first search to identify the exact cycle chain (e.g., `Calc A -> Calc B -> Calc C -> Calc A`), reporting the "First Cycle Found" to the developer for immediate remediation.
38
+
39
+ ## 3. System Architecture & "Source of Truth"
40
+
41
+ The architecture is centered around the **Manifest**, a dynamic, immutable registry of all capabilities within the system.
42
+
43
+ ### 3.1 The Dynamic Manifest
44
+ Unlike static build tools, the Manifest is built at runtime by [ManifestLoader.js](file:///C:/Users/aiden/Desktop/code_projects/Bulltrackers2025/Backend/Entrypoints/BullTrackers/Backend/Core/bulltrackers-module/functions/computation-system/topology/ManifestLoader.js) and [ManifestBuilder.js](file:///C:/Users/aiden/Desktop/code_projects/Bulltrackers2025/Backend/Entrypoints/BullTrackers/Backend/Core/bulltrackers-module/functions/computation-system/context/ManifestBuilder.js). It employs an **Auto-Discovery** mechanism that scans directories for calculation classes.
45
+ * **Static Metadata**: Each class exposes `getMetadata()` and `getDependencies()`.
46
+ * **Product Line Filtering**: The builder can slice the graph, generating a subgraph relevant only to specific product lines (e.g., "Crypto", "Stocks"), reducing overhead.
47
+
48
+ ### 3.2 Granular Hashing & The Audit Chain
49
+ To ensure that "if the code hasn't changed, the result shouldn't change," the system implements a multi-layered hashing strategy ([HashManager.js](file:///C:/Users/aiden/Desktop/code_projects/Bulltrackers2025/Backend/Entrypoints/BullTrackers/Backend/Core/bulltrackers-module/functions/computation-system/topology/HashManager.js)):
50
+ 1. **Code Hash**: The raw string content of the calculation class.
51
+ 2. **Layer Hash**: Hashes of shared utility layers (`mathematics`, `profiling`) used by the class.
52
+ 3. **Dependency Hash**: A composite hash of all upstream dependencies.
53
+ 4. **Infrastructure Hash**: A hash representing the underlying system environment.
54
+ 5. **System Epoch**: A manual versioning flag to force global re-computation.
55
+
56
+ This results in a `Composite Hash`. If this hash matches the `storedHash` in the database, execution can be skipped entirely.
57
+
58
+ ## 4. Execution Engine: Flow, Resilience & Optimization
59
+
60
+ The `WorkflowOrchestrator` acts as the runtime kernel, utilizing [StandardExecutor](file:///C:/Users/aiden/Desktop/code_projects/Bulltrackers2025/Backend/Entrypoints/BullTrackers/Backend/Core/bulltrackers-module/functions/computation-system/executors/StandardExecutor.js#16-257) and [MetaExecutor](file:///C:/Users/aiden/Desktop/code_projects/Bulltrackers2025/Backend/Entrypoints/BullTrackers/Backend/Core/bulltrackers-module/functions/computation-system/executors/MetaExecutor.js#12-83) for the heavy lifting.
61
+
62
+ ### 4.1 Content-Based Dependency Short-Circuiting
63
+ A major optimization (O(n) gain) is the **Content-Based Short-Circuiting** logic found in [WorkflowOrchestrator.js](file:///C:/Users/aiden/Desktop/code_projects/Bulltrackers2025/Backend/Entrypoints/BullTrackers/Backend/Core/bulltrackers-module/functions/computation-system/WorkflowOrchestrator.js):
64
+ Even if an upstream dependency *re-runs* (e.g., its timestamp changed), its *output* might be identical to the previous run.
65
+ 1. The system tracks `ResultHash` (hash of the actual output data).
66
+ 2. When checking dependencies for Node B (which depends on A), if A has re-run but its `ResultHash` is unchanged from what B used last time, B **does not need to re-run**.
67
+ 3. This effectively stops "change propagation" dead in its tracks if the data change is semantically null.
68
+
69
+ ### 4.2 Batch Flushing & OOM Prevention
70
+ Financial datasets (processing 100k+ users with daily portfolios) often exceed Node.js heap limits. The [StandardExecutor](file:///C:/Users/aiden/Desktop/code_projects/Bulltrackers2025/Backend/Entrypoints/BullTrackers/Backend/Core/bulltrackers-module/functions/computation-system/executors/StandardExecutor.js#16-257) implements a **Streaming & Flushing** architecture:
71
+ * **Streams** inputs (Portfolio/History) using generators (`yield`), preventing loading all users into memory.
72
+ * **Buffers** results in a `state` object.
73
+ * **Flushes** to the database (Firestore/Storage) every $N$ users (e.g., 5000), clearing the internal buffer helps avoid Out-Of-Memory crashes.
74
+ * **Incremental Sharding**: It manages shard indices dynamically to split massive result sets into retrievable chunks.
75
+
76
+ ### 4.3 Handling "Impossible" States
77
+ If a dependency fails or is missing critical data, the Orchestrator marks dependent nodes as `IMPOSSIBLE` rather than failing them. This allows the rest of the graph (independent branches) to continue execution, maximizing system throughput even in a partially degraded state.
78
+
79
+ ## 5. Advanced Application: Psychometrics & Risk Geometry
80
+
81
+ The capabilities of this computation engine are best demonstrated by the [profiling.js](file:///C:/Users/aiden/Desktop/code_projects/Bulltrackers2025/Backend/Entrypoints/BullTrackers/Backend/Core/bulltrackers-module/functions/computation-system/layers/profiling.js) layer it powers. Because the DAG ensures all historical and portfolio data is perfectly aligned, we can run sophisticated O(n^2) or O(n log n) algorithms on user data reliably.
82
+
83
+ ### 5.1 "Smart Money" & Cognitive Profiling
84
+ The system executes a [UserClassifier](file:///C:/Users/aiden/Desktop/code_projects/Bulltrackers2025/Backend/Entrypoints/BullTrackers/Backend/Core/bulltrackers-module/functions/computation-system/layers/profiling.js#382-399) that computes:
85
+ * **Risk Geometry**: Using the **Monotone Chain** algorithm to compute the Convex Hull of a user's risk/reward performance (Efficient Frontier analysis).
86
+ * **Psychometrics**: Detecting "Revenge Trading" (increasing risk after losses) and "Disposition Skew" (holding losers too long).
87
+ * **Attribution**: Separating "Luck" (market beta) from "Skill" (Alpha) by comparing performance against sector benchmarks.
88
+
89
+ These complex models depend on the *guarantee* provided by the DAG that all necessary history and price data is pre-computed and available in the [Context](file:///C:/Users/aiden/Desktop/code_projects/Bulltrackers2025/Backend/Entrypoints/BullTrackers/Backend/Core/bulltrackers-module/functions/computation-system/simulation/Fabricator.js#20-69).
90
+
91
+ ## 6. Conclusion
92
+
93
+ The BullTrackers Computation System represents a shift from "Action-Based" to "State-Based" architecture. By encoding the domain logic into a Directed Acyclic Graph, we achieve a system that is self-healing, massively scalable via short-circuiting and batching, and capable of supporting deep analytical models. It provides the robustness required for high-stakes financial simulation, ensuring that every decimal point is traceable, reproducible, and verifiable.
@@ -43,14 +43,14 @@ async function recordRunAttempt(db, context, status, error = null, detailedMetri
43
43
  const timings = rawExecStats.timings || {};
44
44
 
45
45
  const runEntry = {
46
- runId: runId,
46
+ runId: runId,
47
47
  computationName: computation,
48
- pass: String(pass),
49
- workerId: workerId,
50
- targetDate: targetDate,
51
- triggerTime: now.toISOString(),
52
- durationMs: detailedMetrics.durationMs || 0,
53
- status: status,
48
+ pass: String(pass),
49
+ workerId: workerId,
50
+ targetDate: targetDate,
51
+ triggerTime: now.toISOString(),
52
+ durationMs: detailedMetrics.durationMs || 0,
53
+ status: status,
54
54
 
55
55
  // [NEW] Trigger Context
56
56
  trigger: {
@@ -60,12 +60,12 @@ async function recordRunAttempt(db, context, status, error = null, detailedMetri
60
60
 
61
61
  // [IDEA 2] Enhanced Execution Stats
62
62
  executionStats: {
63
- processedUsers: rawExecStats.processedUsers || 0,
64
- skippedUsers: rawExecStats.skippedUsers || 0,
63
+ processedUsers: rawExecStats.processedUsers || 0,
64
+ skippedUsers: rawExecStats.skippedUsers || 0,
65
65
  // Explicitly break out timings for BigQuery/Analysis
66
66
  timings: {
67
- setupMs: Math.round(timings.setup || 0),
68
- streamMs: Math.round(timings.stream || 0),
67
+ setupMs: Math.round(timings.setup || 0),
68
+ streamMs: Math.round(timings.stream || 0),
69
69
  processingMs: Math.round(timings.processing || 0)
70
70
  }
71
71
  },
@@ -73,8 +73,8 @@ async function recordRunAttempt(db, context, status, error = null, detailedMetri
73
73
  outputStats: {
74
74
  sizeMB: sizeMB,
75
75
  isSharded: !!detailedMetrics.storage?.isSharded,
76
- shardCount: detailedMetrics.storage?.shardCount || 1,
77
- keysWritten: detailedMetrics.storage?.keys || 0
76
+ shardCount: detailedMetrics.storage?.shardCount || 1,
77
+ keysWritten: detailedMetrics.storage?.keys || 0
78
78
  },
79
79
 
80
80
  anomalies: anomalies,
@@ -84,9 +84,9 @@ async function recordRunAttempt(db, context, status, error = null, detailedMetri
84
84
  if (error) {
85
85
  runEntry.error = {
86
86
  message: error.message || 'Unknown Error',
87
- stage: error.stage || 'UNKNOWN',
88
- stack: error.stack ? error.stack.substring(0, 1000) : null,
89
- code: error.code || null
87
+ stage: error.stage || 'UNKNOWN',
88
+ stack: error.stack ? error.stack.substring(0, 1000) : null,
89
+ code: error.code || null
90
90
  };
91
91
  }
92
92
 
@@ -0,0 +1,233 @@
1
+ /**
2
+ * @fileoverview Admin API Router
3
+ * Sub-module for system observability, debugging, and visualization.
4
+ * Mounted at /admin within the Generic API.
5
+ */
6
+
7
+ const express = require('express');
8
+ const pLimit = require('p-limit');
9
+ const { getManifest } = require('../../computation-system/topology/ManifestLoader');
10
+ const { normalizeName } = require('../../computation-system/utils/utils');
11
+
12
+ /**
13
+ * Factory to create the Admin Router.
14
+ * @param {object} config - System configuration.
15
+ * @param {object} dependencies - { db, logger, ... }
16
+ * @param {object} unifiedCalculations - The injected calculations package.
17
+ */
18
+ const createAdminRouter = (config, dependencies, unifiedCalculations) => {
19
+ const router = express.Router();
20
+ const { db, logger } = dependencies;
21
+
22
+ // --- 1. TOPOLOGY VISUALIZER ---
23
+ // Returns nodes/edges for React Flow or Cytoscape
24
+ router.get('/topology', async (req, res) => {
25
+ try {
26
+ // Build manifest using the INJECTED calculations object
27
+ // Passing [] for productLines ensures we get the FULL graph
28
+ const manifest = getManifest([], unifiedCalculations, dependencies);
29
+
30
+ const nodes = [];
31
+ const edges = [];
32
+
33
+ manifest.forEach(calc => {
34
+ // Nodes
35
+ nodes.push({
36
+ id: calc.name,
37
+ data: {
38
+ label: calc.name,
39
+ layer: calc.category,
40
+ pass: calc.pass, // Visualization can group columns by Pass
41
+ isHistorical: calc.isHistorical,
42
+ type: calc.type
43
+ },
44
+ position: { x: 0, y: 0 } // Frontend handles layout (e.g. Dagre)
45
+ });
46
+
47
+ // Dependency Edges (Calc -> Calc)
48
+ if (calc.dependencies) {
49
+ calc.dependencies.forEach(dep => {
50
+ edges.push({
51
+ id: `e-${dep}-${calc.name}`,
52
+ source: normalizeName(dep),
53
+ target: calc.name,
54
+ type: 'default',
55
+ animated: false
56
+ });
57
+ });
58
+ }
59
+
60
+ // Root Data Edges (Data -> Calc)
61
+ if (calc.rootDataDependencies) {
62
+ calc.rootDataDependencies.forEach(root => {
63
+ // Ensure a node exists for the root data type
64
+ const rootId = `ROOT_${root.toUpperCase()}`;
65
+ if (!nodes.find(n => n.id === rootId)) {
66
+ nodes.push({
67
+ id: rootId,
68
+ type: 'input', // Special React Flow type
69
+ data: { label: `${root.toUpperCase()} DB` },
70
+ position: { x: 0, y: 0 }
71
+ });
72
+ }
73
+
74
+ edges.push({
75
+ id: `e-root-${root}-${calc.name}`,
76
+ source: rootId,
77
+ target: calc.name,
78
+ animated: true,
79
+ style: { stroke: '#ff0072' } // Highlight data flow
80
+ });
81
+ });
82
+ }
83
+ });
84
+
85
+ res.json({
86
+ summary: {
87
+ totalNodes: nodes.length,
88
+ totalEdges: edges.length
89
+ },
90
+ nodes,
91
+ edges
92
+ });
93
+ } catch (e) {
94
+ logger.log('ERROR', '[AdminAPI] Topology build failed', e);
95
+ res.status(500).json({ error: e.message });
96
+ }
97
+ });
98
+
99
+ // --- 2. STATUS MATRIX (Calendar View) ---
100
+ // ?start=2023-01-01&end=2023-01-30
101
+ router.get('/matrix', async (req, res) => {
102
+ const { start, end } = req.query;
103
+ if (!start || !end) return res.status(400).json({ error: "Start (YYYY-MM-DD) and End dates required." });
104
+
105
+ try {
106
+ const startDate = new Date(start);
107
+ const endDate = new Date(end);
108
+ const dates = [];
109
+
110
+ // Generate date range
111
+ for (let d = new Date(startDate); d <= endDate; d.setDate(d.getDate() + 1)) {
112
+ dates.push(d.toISOString().slice(0, 10));
113
+ }
114
+
115
+ const limit = pLimit(20); // Concurrent Firestore reads
116
+ const matrix = {};
117
+
118
+ await Promise.all(dates.map(date => limit(async () => {
119
+ // Fetch Global Status and Root Data Availability
120
+ const [statusSnap, rootSnap] = await Promise.all([
121
+ db.collection('computation_status').doc(date).get(),
122
+ db.collection('system_root_data_index').doc(date).get()
123
+ ]);
124
+
125
+ // Flatten status for frontend (calcName -> { status: 'COMPLETED' | 'IMPOSSIBLE' })
126
+ const statusData = statusSnap.exists ? statusSnap.data() : {};
127
+ const rootData = rootSnap.exists ? rootSnap.data() : { status: { hasPortfolio: false } };
128
+
129
+ // Clean up status map
130
+ const cleanStatus = {};
131
+ Object.keys(statusData).forEach(key => {
132
+ const entry = statusData[key];
133
+ if (typeof entry === 'object') {
134
+ if (entry.hash && entry.hash.startsWith('IMPOSSIBLE')) cleanStatus[key] = 'IMPOSSIBLE';
135
+ else cleanStatus[key] = 'COMPLETED';
136
+ } else if (entry === 'IMPOSSIBLE') {
137
+ cleanStatus[key] = 'IMPOSSIBLE';
138
+ } else if (entry === true || typeof entry === 'string') {
139
+ cleanStatus[key] = 'COMPLETED';
140
+ }
141
+ });
142
+
143
+ matrix[date] = {
144
+ dataAvailable: rootData.status || {}, // e.g. { hasPortfolio: true }
145
+ calculations: cleanStatus
146
+ };
147
+ })));
148
+
149
+ res.json(matrix);
150
+ } catch (e) {
151
+ logger.log('ERROR', '[AdminAPI] Matrix fetch failed', e);
152
+ res.status(500).json({ error: e.message });
153
+ }
154
+ });
155
+
156
+ // --- 3. FLIGHT RECORDER (Inspection) ---
157
+ // Look up execution details for a specific Calc + Date
158
+ router.get('/inspect/:date/:calcName', async (req, res) => {
159
+ const { date, calcName } = req.params;
160
+ try {
161
+ // We search across all potential passes (1-5) because we might not know which one it belongs to
162
+ const passes = ['1', '2', '3', '4', '5'];
163
+ let executionRecord = null;
164
+
165
+ // Run in parallel to find the record fast
166
+ await Promise.all(passes.map(async (pass) => {
167
+ if (executionRecord) return; // Optimization
168
+ const ref = db.doc(`computation_audit_ledger/${date}/passes/${pass}/tasks/${calcName}`);
169
+ const snap = await ref.get();
170
+ if (snap.exists) {
171
+ executionRecord = { pass, ...snap.data() };
172
+ }
173
+ }));
174
+
175
+ if (!executionRecord) {
176
+ return res.status(404).json({
177
+ status: 'NOT_FOUND',
178
+ message: `No execution record found in ledger for ${calcName} on ${date}`
179
+ });
180
+ }
181
+
182
+ // Also fetch the "Contract" if it exists (for volatility analysis)
183
+ const contractSnap = await db.collection('system_contracts').doc(calcName).get();
184
+
185
+ res.json({
186
+ execution: executionRecord,
187
+ contract: contractSnap.exists ? contractSnap.data() : null
188
+ });
189
+
190
+ } catch (e) {
191
+ logger.log('ERROR', `[AdminAPI] Inspect failed for ${calcName}`, e);
192
+ res.status(500).json({ error: e.message });
193
+ }
194
+ });
195
+
196
+ // --- 4. ANOMALY DETECTOR ---
197
+ // Finds recent crashes and chronic failures
198
+ router.get('/anomalies', async (req, res) => {
199
+ try {
200
+ const [dlqSnap, statsSnap] = await Promise.all([
201
+ db.collection('computation_dead_letter_queue').orderBy('finalAttemptAt', 'desc').limit(50).get(),
202
+ db.collection('computation_audit_logs').orderBy('failureCount', 'desc').limit(20).get()
203
+ ]);
204
+
205
+ const recentCrashes = [];
206
+ dlqSnap.forEach(doc => recentCrashes.push({ id: doc.id, ...doc.data() }));
207
+
208
+ const chronicFailures = [];
209
+ statsSnap.forEach(doc => {
210
+ const d = doc.data();
211
+ if (d.failureCount > 0) {
212
+ chronicFailures.push({
213
+ computation: doc.id,
214
+ failures: d.failureCount,
215
+ successes: d.successCount || 0,
216
+ lastError: d.lastRunStatus
217
+ });
218
+ }
219
+ });
220
+
221
+ res.json({
222
+ recentCrashes,
223
+ chronicFailures
224
+ });
225
+ } catch (e) {
226
+ res.status(500).json({ error: e.message });
227
+ }
228
+ });
229
+
230
+ return router;
231
+ };
232
+
233
+ module.exports = createAdminRouter;
@@ -1,10 +1,31 @@
1
1
  /**
2
2
  * @fileoverview API sub-pipes.
3
- * REFACTORED: Fixed Category Resolution to match ManifestBuilder logic.
4
- * Implements Status-Based Availability Caching and Smart Date Resolution.
3
+ * REFACTORED: API V3 - Status-Aware Data Fetching.
4
+ * UPDATED: Added GZIP Decompression support for fetching compressed results.
5
5
  */
6
6
 
7
7
  const { FieldPath } = require('@google-cloud/firestore');
8
+ const zlib = require('zlib'); // [NEW] Required for decompression
9
+
10
+ // --- HELPER: DECOMPRESSION ---
11
+ /**
12
+ * Checks if data is compressed and inflates it if necessary.
13
+ * @param {object} data - The raw Firestore document data.
14
+ * @returns {object} The original (decompressed) JSON object.
15
+ */
16
+ function tryDecompress(data) {
17
+ if (data && data._compressed === true && data.payload) {
18
+ try {
19
+ // Firestore returns Buffers automatically for Blob types
20
+ return JSON.parse(zlib.gunzipSync(data.payload).toString());
21
+ } catch (e) {
22
+ console.error('[API] Decompression failed:', e);
23
+ // Return empty object or original data on failure to avoid crashing response
24
+ return {};
25
+ }
26
+ }
27
+ return data;
28
+ }
8
29
 
9
30
  // --- AVAILABILITY CACHE ---
10
31
  class AvailabilityCache {
@@ -147,6 +168,7 @@ const buildCalculationMap = (unifiedCalculations) => {
147
168
 
148
169
  /**
149
170
  * Sub-pipe: pipe.api.helpers.fetchUnifiedData
171
+ * UPDATED: Uses tryDecompress to handle compressed payloads.
150
172
  */
151
173
  const fetchUnifiedData = async (config, dependencies, calcKeys, dateStrings, calcMap) => {
152
174
  const { db, logger } = dependencies;
@@ -191,7 +213,8 @@ const fetchUnifiedData = async (config, dependencies, calcKeys, dateStrings, cal
191
213
  snapshots.forEach((doc, idx) => {
192
214
  const { date, key } = chunk[idx];
193
215
  if (doc.exists) {
194
- response[date][key] = doc.data();
216
+ // [UPDATED] Decompress data if needed
217
+ response[date][key] = tryDecompress(doc.data());
195
218
  } else {
196
219
  response[date][key] = null;
197
220
  }
@@ -321,8 +344,11 @@ async function getComputationStructure(computationName, calcMap, config, depende
321
344
  .collection(compsSub).doc(computationName);
322
345
  const doc = await docRef.get();
323
346
  if (!doc.exists) { return { status: 'error', computation: computationName, message: `Summary flag was present for ${latestStoredDate} but doc is missing.` }; }
324
- const fullData = doc.data();
347
+
348
+ // [UPDATED] Decompress data for structure inspection
349
+ const fullData = tryDecompress(doc.data());
325
350
  const structureSnippet = createStructureSnippet(fullData);
351
+
326
352
  return { status: 'success', computation: computationName, category: category, latestStoredDate: latestStoredDate, structureSnippet: structureSnippet, };
327
353
  } catch (error) {
328
354
  logger.log('ERROR', `API /structure/${computationName} helper failed.`, { errorMessage: error.message });
@@ -2,12 +2,16 @@
2
2
  * @fileoverview Main entry point for the Generic API module.
3
3
  * Export the 'createApiApp' main pipe function.
4
4
  * REFACTORED: API V3 - Status-Aware Data Fetching.
5
+ * UPDATED: Added Admin API Mount.
5
6
  */
6
7
 
7
8
  const express = require('express');
8
9
  const cors = require('cors');
9
10
  const { buildCalculationMap, createApiHandler, getComputationStructure, createManifestHandler, getDynamicSchema } = require('./helpers/api_helpers.js');
10
11
 
12
+ // [NEW] Import Admin Router
13
+ const createAdminRouter = require('./admin-api/index');
14
+
11
15
  /**
12
16
  * In-Memory Cache Handler
13
17
  * Wrapper that adds TTL cache to GET requests.
@@ -71,8 +75,11 @@ function createApiApp(config, dependencies, unifiedCalculations) {
71
75
  app.use(cors({ origin: true }));
72
76
  app.use(express.json());
73
77
 
78
+ // --- [NEW] MOUNT ADMIN API ---
79
+ // This injects the dependencies and the calculations package into the admin router
80
+ app.use('/admin', createAdminRouter(config, dependencies, unifiedCalculations));
81
+
74
82
  // --- Main API V3 Endpoint ---
75
- // createApiHandler now initializes the AvailabilityCache internally
76
83
  const originalApiHandler = createApiHandler(config, dependencies, calcMap);
77
84
  const cachedApiHandler = createCacheHandler(originalApiHandler, dependencies);
78
85
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bulltrackers-module",
3
- "version": "1.0.293",
3
+ "version": "1.0.295",
4
4
  "description": "Helper Functions for Bulltrackers.",
5
5
  "main": "index.js",
6
6
  "files": [