bulltrackers-module 1.0.295 → 1.0.297

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,32 +1,21 @@
1
1
  /**
2
2
  * @fileoverview Handles saving computation results with observability and Smart Cleanup.
3
- * UPDATED: Implements GZIP Compression for efficient storage.
4
- * UPDATED: Implements Content-Based Hashing (ResultHash) for dependency short-circuiting.
5
- * UPDATED: Auto-enforces Weekend Mode validation.
6
- * UPDATED: Implements "Initial Write" logic to wipe stale data/shards on a fresh run.
7
- * UPDATED: Implements "Contract Validation" (Semantic Gates) to block logical violations.
8
- * OPTIMIZED: Fetches pre-calculated 'simHash' from Registry (removes expensive simulation step).
3
+ * UPDATED: Tracks specific Firestore Ops (Writes/Deletes) for cost analysis.
9
4
  */
10
5
  const { commitBatchInChunks, generateDataHash } = require('../utils/utils');
11
6
  const { updateComputationStatus } = require('./StatusRepository');
12
7
  const { batchStoreSchemas } = require('../utils/schema_capture');
13
8
  const { generateProcessId, PROCESS_TYPES } = require('../logger/logger');
14
9
  const { HeuristicValidator } = require('./ResultsValidator');
15
- const ContractValidator = require('./ContractValidator'); // [NEW]
10
+ const ContractValidator = require('./ContractValidator');
16
11
  const validationOverrides = require('../config/validation_overrides');
17
12
  const pLimit = require('p-limit');
18
13
  const zlib = require('zlib');
19
14
 
20
- const NON_RETRYABLE_ERRORS = [
21
- 'PERMISSION_DENIED', 'DATA_LOSS', 'FAILED_PRECONDITION'
22
- ];
23
-
15
+ const NON_RETRYABLE_ERRORS = [ 'PERMISSION_DENIED', 'DATA_LOSS', 'FAILED_PRECONDITION' ];
24
16
  const SIMHASH_REGISTRY_COLLECTION = 'system_simhash_registry';
25
- const CONTRACTS_COLLECTION = 'system_contracts'; // [NEW]
17
+ const CONTRACTS_COLLECTION = 'system_contracts';
26
18
 
27
- /**
28
- * Commits results to Firestore.
29
- */
30
19
  async function commitResults(stateObj, dStr, passName, config, deps, skipStatusWrite = false, options = {}) {
31
20
  const successUpdates = {};
32
21
  const failureReport = [];
@@ -35,17 +24,12 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
35
24
  const { logger, db } = deps;
36
25
  const pid = generateProcessId(PROCESS_TYPES.STORAGE, passName, dStr);
37
26
 
38
- // Options defaults
39
27
  const flushMode = options.flushMode || 'STANDARD';
40
28
  const isInitialWrite = options.isInitialWrite === true;
41
29
  const shardIndexes = options.shardIndexes || {};
42
30
  const nextShardIndexes = {};
43
-
44
- const fanOutLimit = pLimit(10);
45
-
46
- // [NEW] Bulk fetch contracts for all calcs in this batch to minimize latency
47
- // This prevents N+1 reads during the loop
48
- const contractMap = await fetchContracts(db, Object.keys(stateObj));
31
+ const fanOutLimit = pLimit(10);
32
+ const contractMap = await fetchContracts(db, Object.keys(stateObj));
49
33
 
50
34
  for (const name in stateObj) {
51
35
  const calc = stateObj[name];
@@ -55,43 +39,34 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
55
39
  const runMetrics = {
56
40
  storage: { sizeBytes: 0, isSharded: false, shardCount: 1, keys: 0 },
57
41
  validation: { isValid: true, anomalies: [] },
58
- execution: execStats
42
+ execution: execStats,
43
+ // [NEW] Track Ops
44
+ io: { writes: 0, deletes: 0 }
59
45
  };
60
46
 
61
47
  try {
62
48
  const result = await calc.getResult();
63
- const configOverrides = validationOverrides[calc.manifest.name] || {};
64
49
 
50
+ const configOverrides = validationOverrides[calc.manifest.name] || {};
65
51
  const dataDeps = calc.manifest.rootDataDependencies || [];
66
52
  const isPriceOnly = (dataDeps.length === 1 && dataDeps[0] === 'price');
67
-
68
53
  let effectiveOverrides = { ...configOverrides };
69
-
70
54
  if (isPriceOnly && !effectiveOverrides.weekend) {
71
- effectiveOverrides.weekend = {
72
- maxZeroPct: 100,
73
- maxFlatlinePct: 100,
74
- maxNullPct: 100
75
- };
55
+ effectiveOverrides.weekend = { maxZeroPct: 100, maxFlatlinePct: 100, maxNullPct: 100 };
76
56
  }
77
57
 
78
- // 1. SEMANTIC GATE (CONTRACT VALIDATION) [NEW]
79
- // We run this BEFORE Heuristics because it catches "Logic Bugs" vs "Data Noise"
80
58
  const contract = contractMap[name];
81
59
  if (contract) {
82
60
  const contractCheck = ContractValidator.validate(result, contract);
83
61
  if (!contractCheck.valid) {
84
- // STOP THE CASCADE: Fail this specific calculation
85
62
  runMetrics.validation.isValid = false;
86
63
  runMetrics.validation.anomalies.push(contractCheck.reason);
87
-
88
64
  const semanticError = new Error(contractCheck.reason);
89
65
  semanticError.stage = 'SEMANTIC_GATE';
90
66
  throw semanticError;
91
67
  }
92
68
  }
93
69
 
94
- // 2. HEURISTIC VALIDATION (Data Integrity)
95
70
  if (result && Object.keys(result).length > 0) {
96
71
  const healthCheck = HeuristicValidator.analyze(calc.manifest.name, result, dStr, effectiveOverrides);
97
72
  if (!healthCheck.valid) {
@@ -102,38 +77,25 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
102
77
  throw validationError;
103
78
  }
104
79
  }
105
-
80
+
106
81
  const isEmpty = !result || (typeof result === 'object' && Object.keys(result).length === 0);
107
82
  const resultHash = isEmpty ? 'empty' : generateDataHash(result);
108
-
109
- // [OPTIMIZATION] FETCH SimHash from Registry (Do NOT Calculate)
83
+
110
84
  let simHash = null;
111
85
  if (calc.manifest.hash && flushMode !== 'INTERMEDIATE') {
112
- try {
86
+ try {
113
87
  const regDoc = await db.collection(SIMHASH_REGISTRY_COLLECTION).doc(calc.manifest.hash).get();
114
- if (regDoc.exists) {
115
- simHash = regDoc.data().simHash;
116
- } else {
117
- logger.log('WARN', `[ResultCommitter] SimHash not found in registry for ${name}.`);
118
- }
119
- } catch (regErr) {
120
- logger.log('WARN', `[ResultCommitter] Failed to read SimHash registry: ${regErr.message}`);
121
- }
88
+ if (regDoc.exists) simHash = regDoc.data().simHash;
89
+ } catch (e) {}
122
90
  }
123
91
 
124
92
  if (isEmpty) {
125
- if (flushMode === 'INTERMEDIATE') {
126
- nextShardIndexes[name] = currentShardIndex;
127
- continue;
128
- }
93
+ if (flushMode === 'INTERMEDIATE') { nextShardIndexes[name] = currentShardIndex; continue; }
129
94
  if (calc.manifest.hash) {
130
95
  successUpdates[name] = {
131
- hash: calc.manifest.hash,
132
- simHash: simHash,
133
- resultHash: resultHash,
96
+ hash: calc.manifest.hash, simHash: simHash, resultHash: resultHash,
134
97
  dependencyResultHashes: calc.manifest.dependencyResultHashes || {},
135
- category: calc.manifest.category,
136
- composition: calc.manifest.composition,
98
+ category: calc.manifest.category, composition: calc.manifest.composition,
137
99
  metrics: runMetrics
138
100
  };
139
101
  }
@@ -141,7 +103,6 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
141
103
  }
142
104
 
143
105
  if (typeof result === 'object') runMetrics.storage.keys = Object.keys(result).length;
144
-
145
106
  const resultKeys = Object.keys(result || {});
146
107
  const isMultiDate = resultKeys.length > 0 && resultKeys.every(k => /^\d{4}-\d{2}-\d{2}$/.test(k));
147
108
 
@@ -149,73 +110,42 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
149
110
  const datePromises = resultKeys.map((historicalDate) => fanOutLimit(async () => {
150
111
  const dailyData = result[historicalDate];
151
112
  if (!dailyData || Object.keys(dailyData).length === 0) return;
152
-
153
- const historicalDocRef = db.collection(config.resultsCollection)
154
- .doc(historicalDate)
155
- .collection(config.resultsSubcollection)
156
- .doc(calc.manifest.category)
157
- .collection(config.computationsSubcollection)
158
- .doc(name);
159
-
160
- await writeSingleResult(dailyData, historicalDocRef, name, historicalDate, logger, config, deps, 0, 'STANDARD', false);
113
+ const historicalDocRef = db.collection(config.resultsCollection).doc(historicalDate).collection(config.resultsSubcollection).doc(calc.manifest.category).collection(config.computationsSubcollection).doc(name);
114
+ const stats = await writeSingleResult(dailyData, historicalDocRef, name, historicalDate, logger, config, deps, 0, 'STANDARD', false);
115
+
116
+ // Aggregate IO Ops
117
+ runMetrics.io.writes += stats.opCounts.writes;
118
+ runMetrics.io.deletes += stats.opCounts.deletes;
161
119
  }));
162
120
  await Promise.all(datePromises);
163
121
 
164
- if (calc.manifest.hash) {
165
- successUpdates[name] = {
166
- hash: calc.manifest.hash,
167
- simHash: simHash,
168
- resultHash: resultHash,
169
- dependencyResultHashes: calc.manifest.dependencyResultHashes || {},
170
- category: calc.manifest.category,
171
- composition: calc.manifest.composition,
172
- metrics: runMetrics
173
- };
174
- }
175
-
122
+ if (calc.manifest.hash) { successUpdates[name] = { hash: calc.manifest.hash, simHash, resultHash, dependencyResultHashes: calc.manifest.dependencyResultHashes || {}, category: calc.manifest.category, composition: calc.manifest.composition, metrics: runMetrics }; }
176
123
  } else {
177
- const mainDocRef = db.collection(config.resultsCollection)
178
- .doc(dStr)
179
- .collection(config.resultsSubcollection)
180
- .doc(calc.manifest.category)
181
- .collection(config.computationsSubcollection)
182
- .doc(name);
183
-
124
+ const mainDocRef = db.collection(config.resultsCollection).doc(dStr).collection(config.resultsSubcollection).doc(calc.manifest.category).collection(config.computationsSubcollection).doc(name);
184
125
  const writeStats = await writeSingleResult(result, mainDocRef, name, dStr, logger, config, deps, currentShardIndex, flushMode, isInitialWrite);
185
126
 
186
127
  runMetrics.storage.sizeBytes = writeStats.totalSize;
187
128
  runMetrics.storage.isSharded = writeStats.isSharded;
188
129
  runMetrics.storage.shardCount = writeStats.shardCount;
130
+ runMetrics.io.writes += writeStats.opCounts.writes;
131
+ runMetrics.io.deletes += writeStats.opCounts.deletes;
189
132
 
190
133
  nextShardIndexes[name] = writeStats.nextShardIndex;
191
-
192
- if (calc.manifest.hash) {
193
- successUpdates[name] = {
194
- hash: calc.manifest.hash,
195
- simHash: simHash,
196
- resultHash: resultHash,
197
- dependencyResultHashes: calc.manifest.dependencyResultHashes || {},
198
- category: calc.manifest.category,
199
- composition: calc.manifest.composition,
200
- metrics: runMetrics
201
- };
202
- }
134
+ if (calc.manifest.hash) { successUpdates[name] = { hash: calc.manifest.hash, simHash, resultHash, dependencyResultHashes: calc.manifest.dependencyResultHashes || {}, category: calc.manifest.category, composition: calc.manifest.composition, metrics: runMetrics }; }
203
135
  }
204
136
 
205
137
  if (calc.manifest.class.getSchema && flushMode !== 'INTERMEDIATE') {
206
138
  const { class: _cls, ...safeMetadata } = calc.manifest;
207
139
  schemas.push({ name, category: calc.manifest.category, schema: calc.manifest.class.getSchema(), metadata: safeMetadata });
208
140
  }
209
-
210
141
  if (calc.manifest.previousCategory && calc.manifest.previousCategory !== calc.manifest.category && flushMode !== 'INTERMEDIATE') {
211
142
  cleanupTasks.push(deleteOldCalculationData(dStr, calc.manifest.previousCategory, name, config, deps));
212
143
  }
213
144
 
214
145
  } catch (e) {
215
146
  const stage = e.stage || 'EXECUTION';
216
- const msg = e.message || 'Unknown error';
217
147
  if (logger && logger.log) { logger.log('ERROR', `Commit failed for ${name} [${stage}]`, { processId: pid, error: e }); }
218
- failureReport.push({ name, error: { message: msg, stack: e.stack, stage }, metrics: runMetrics });
148
+ failureReport.push({ name, error: { message: e.message, stack: e.stack, stage }, metrics: runMetrics });
219
149
  }
220
150
  }
221
151
 
@@ -228,48 +158,34 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
228
158
  return { successUpdates, failureReport, shardIndexes: nextShardIndexes };
229
159
  }
230
160
 
231
- /**
232
- * [NEW] Helper to fetch contracts for a list of calculations
233
- */
234
161
  async function fetchContracts(db, calcNames) {
235
162
  if (!calcNames || calcNames.length === 0) return {};
236
163
  const map = {};
237
-
238
- // In a high-throughput system, we might cache these in memory (LRU)
239
- // For now, we fetch from Firestore efficiently.
240
164
  const refs = calcNames.map(name => db.collection(CONTRACTS_COLLECTION).doc(name));
241
-
242
165
  try {
243
166
  const snaps = await db.getAll(...refs);
244
- snaps.forEach(snap => {
245
- if (snap.exists) {
246
- map[snap.id] = snap.data();
247
- }
248
- });
249
- } catch (e) {
250
- console.warn(`[ResultCommitter] Failed to fetch contracts batch: ${e.message}`);
251
- }
167
+ snaps.forEach(snap => { if (snap.exists) map[snap.id] = snap.data(); });
168
+ } catch (e) {}
252
169
  return map;
253
170
  }
254
171
 
255
172
  async function writeSingleResult(result, docRef, name, dateContext, logger, config, deps, startShardIndex = 0, flushMode = 'STANDARD', isInitialWrite = false) {
256
-
257
- // Transition & Cleanup Logic
173
+ const opCounts = { writes: 0, deletes: 0 };
258
174
  let wasSharded = false;
259
175
  let shouldWipeShards = false;
260
-
261
- // Default: Merge updates. But if Initial Write, overwrite (merge: false) to clear stale fields.
262
176
  let rootMergeOption = !isInitialWrite;
263
177
 
264
178
  if (isInitialWrite) {
265
179
  try {
266
180
  const currentSnap = await docRef.get();
181
+ // Note: Reads tracked implicitly by calling code or approximated here if needed.
182
+ // We focus on writes/deletes here.
267
183
  if (currentSnap.exists) {
268
184
  const d = currentSnap.data();
269
185
  wasSharded = (d._sharded === true);
270
186
  if (wasSharded) shouldWipeShards = true;
271
187
  }
272
- } catch (e) { /* ignore read error */ }
188
+ } catch (e) {}
273
189
  }
274
190
 
275
191
  // --- COMPRESSION STRATEGY ---
@@ -279,54 +195,32 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
279
195
 
280
196
  if (rawBuffer.length > 50 * 1024) {
281
197
  const compressedBuffer = zlib.gzipSync(rawBuffer);
282
-
283
198
  if (compressedBuffer.length < 900 * 1024) {
284
- logger.log('INFO', `[Compression] ${name}: Compressed ${(rawBuffer.length/1024).toFixed(0)}KB -> ${(compressedBuffer.length/1024).toFixed(0)}KB. Saved as Blob.`);
199
+ logger.log('INFO', `[Compression] ${name}: Compressed ${(rawBuffer.length/1024).toFixed(0)}KB -> ${(compressedBuffer.length/1024).toFixed(0)}KB.`);
200
+ const compressedPayload = { _compressed: true, _completed: true, _lastUpdated: new Date().toISOString(), payload: compressedBuffer };
285
201
 
286
- const compressedPayload = {
287
- _compressed: true,
288
- _completed: true,
289
- _lastUpdated: new Date().toISOString(),
290
- payload: compressedBuffer
291
- };
292
-
293
- // Cleanup: If it was sharded, or if we are wiping shards on initial write
294
202
  if (shouldWipeShards) {
295
- logger.log('INFO', `[Cleanup] ${name}: Wiping old shards before Compressed Write.`);
296
203
  const updates = [];
297
204
  const shardCol = docRef.collection('_shards');
298
205
  const shardDocs = await shardCol.listDocuments();
299
206
  shardDocs.forEach(d => updates.push({ type: 'DELETE', ref: d }));
300
-
301
- // Root update with merge: false (overwrites everything)
302
207
  updates.push({ ref: docRef, data: compressedPayload, options: { merge: false } });
303
208
 
209
+ opCounts.deletes += shardDocs.length;
210
+ opCounts.writes += 1;
211
+
304
212
  await commitBatchInChunks(config, deps, updates, `${name}::Cleanup+Compress`);
305
213
  } else {
306
- // Standard update (respecting calculated rootMergeOption)
307
214
  await docRef.set(compressedPayload, { merge: rootMergeOption });
215
+ opCounts.writes += 1;
308
216
  }
309
217
 
310
- return {
311
- totalSize: compressedBuffer.length,
312
- isSharded: false,
313
- shardCount: 1,
314
- nextShardIndex: startShardIndex
315
- };
218
+ return { totalSize: compressedBuffer.length, isSharded: false, shardCount: 1, nextShardIndex: startShardIndex, opCounts };
316
219
  }
317
220
  }
318
- } catch (compErr) {
319
- logger.log('WARN', `[Compression] Failed to compress ${name}. Falling back to standard sharding.`, compErr);
320
- }
321
- // --- END COMPRESSION STRATEGY ---
322
-
323
- const strategies = [
324
- { bytes: 900 * 1024, keys: null },
325
- { bytes: 450 * 1024, keys: 10000 },
326
- { bytes: 200 * 1024, keys: 2000 },
327
- { bytes: 100 * 1024, keys: 50 }
328
- ];
221
+ } catch (compErr) {}
329
222
 
223
+ const strategies = [ { bytes: 900 * 1024, keys: null }, { bytes: 450 * 1024, keys: 10000 }, { bytes: 200 * 1024, keys: 2000 }, { bytes: 100 * 1024, keys: 50 } ];
330
224
  let committed = false; let lastError = null;
331
225
  let finalStats = { totalSize: 0, isSharded: false, shardCount: 1, nextShardIndex: startShardIndex };
332
226
 
@@ -335,26 +229,27 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
335
229
  const constraints = strategies[attempt];
336
230
  try {
337
231
  const updates = await prepareAutoShardedWrites(result, docRef, logger, constraints.bytes, constraints.keys, startShardIndex, flushMode);
338
-
339
- // Inject Cleanup Ops
340
232
  if (shouldWipeShards) {
341
- logger.log('INFO', `[Cleanup] ${name}: Wiping old shards before Write (Initial).`);
342
233
  const shardCol = docRef.collection('_shards');
343
234
  const shardDocs = await shardCol.listDocuments();
344
- // Prepend DELETEs
345
235
  shardDocs.forEach(d => updates.unshift({ type: 'DELETE', ref: d }));
346
- shouldWipeShards = false; // Done for this loop
236
+ shouldWipeShards = false;
347
237
  }
348
-
349
- // Ensure the root document write respects our merge option
350
238
  const rootUpdate = updates.find(u => u.ref.path === docRef.path && u.type !== 'DELETE');
351
- if (rootUpdate) {
352
- rootUpdate.options = { merge: rootMergeOption };
353
- }
239
+ if (rootUpdate) { rootUpdate.options = { merge: rootMergeOption }; }
354
240
 
355
- const pointer = updates.find(u => u.data && (u.data._completed !== undefined || u.data._sharded !== undefined));
241
+ // Calculate Ops
242
+ const writes = updates.filter(u => u.type !== 'DELETE').length;
243
+ const deletes = updates.filter(u => u.type === 'DELETE').length;
244
+
245
+ await commitBatchInChunks(config, deps, updates, `${name}::${dateContext}`);
246
+
247
+ opCounts.writes += writes;
248
+ opCounts.deletes += deletes;
249
+
356
250
  finalStats.totalSize = updates.reduce((acc, u) => acc + (u.data ? JSON.stringify(u.data).length : 0), 0);
357
251
 
252
+ const pointer = updates.find(u => u.data && (u.data._completed !== undefined || u.data._sharded !== undefined));
358
253
  let maxIndex = startShardIndex;
359
254
  updates.forEach(u => {
360
255
  if (u.type === 'DELETE') return;
@@ -374,28 +269,26 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
374
269
  finalStats.nextShardIndex = maxIndex + 1;
375
270
  finalStats.isSharded = true;
376
271
  }
377
-
378
- await commitBatchInChunks(config, deps, updates, `${name}::${dateContext} (Att ${attempt+1})`);
379
- if (logger && logger.logStorage) { logger.logStorage(null, name, dateContext, docRef.path, finalStats.totalSize, finalStats.isSharded); }
272
+
380
273
  committed = true;
381
274
  } catch (commitErr) {
382
- lastError = commitErr;
383
- const msg = commitErr.message || '';
384
- const code = commitErr.code || '';
385
- const isIndexError = msg.includes('too many index entries') || msg.includes('INVALID_ARGUMENT');
386
- const isSizeError = msg.includes('Transaction too big') || msg.includes('payload is too large');
387
-
388
- if (NON_RETRYABLE_ERRORS.includes(code)) {
389
- logger.log('ERROR', `[SelfHealing] ${name} FATAL error: ${msg}.`);
390
- throw commitErr;
391
- }
392
- if (isIndexError || isSizeError) {
393
- logger.log('WARN', `[SelfHealing] ${name} on ${dateContext} failed attempt ${attempt+1}/${strategies.length}. Strategy: ${JSON.stringify(constraints)}. Error: ${msg}. Retrying with stricter limits...`);
394
- continue;
395
- } else {
396
- logger.log('WARN', `[SelfHealing] ${name} on ${dateContext} unknown error. Retrying...`, { error: msg });
397
- continue;
398
- }
275
+ lastError = commitErr;
276
+ const msg = commitErr.message || '';
277
+ const code = commitErr.code || '';
278
+ const isIndexError = msg.includes('too many index entries') || msg.includes('INVALID_ARGUMENT');
279
+ const isSizeError = msg.includes('Transaction too big') || msg.includes('payload is too large');
280
+
281
+ if (NON_RETRYABLE_ERRORS.includes(code)) {
282
+ logger.log('ERROR', `[SelfHealing] ${name} FATAL error: ${msg}.`);
283
+ throw commitErr;
284
+ }
285
+ if (isIndexError || isSizeError) {
286
+ logger.log('WARN', `[SelfHealing] ${name} on ${dateContext} failed attempt ${attempt+1}/${strategies.length}. Strategy: ${JSON.stringify(constraints)}. Error: ${msg}. Retrying with stricter limits...`);
287
+ continue;
288
+ } else {
289
+ logger.log('WARN', `[SelfHealing] ${name} on ${dateContext} unknown error. Retrying...`, { error: msg });
290
+ continue;
291
+ }
399
292
  }
400
293
  }
401
294
  if (!committed) {
@@ -404,6 +297,7 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
404
297
  if (lastError && lastError.stack) { shardingError.stack = lastError.stack; }
405
298
  throw shardingError;
406
299
  }
300
+ finalStats.opCounts = opCounts;
407
301
  return finalStats;
408
302
  }
409
303
 
@@ -1,7 +1,6 @@
1
1
  /**
2
2
  * @fileoverview Utility for recording computation run attempts (The Audit Logger).
3
- * UPDATED: Stores 'trigger' reason and 'execution' stats.
4
- * UPDATED (IDEA 2): Stores granular timing profiles.
3
+ * UPDATED: Stores 'trigger', 'execution' stats, 'cost' metrics, and 'forensics'.
5
4
  */
6
5
 
7
6
  const { FieldValue } = require('../utils/utils');
@@ -17,9 +16,8 @@ function sanitizeErrorKey(message) {
17
16
 
18
17
  /**
19
18
  * Records a run attempt with detailed metrics and aggregated stats.
20
- * ADDED: 'triggerReason' param.
21
19
  */
22
- async function recordRunAttempt(db, context, status, error = null, detailedMetrics = { durationMs: 0 }, triggerReason = 'Unknown') {
20
+ async function recordRunAttempt(db, context, status, error = null, detailedMetrics = { durationMs: 0 }, triggerReason = 'Unknown', resourceTier = 'standard') {
23
21
  if (!db || !context) return;
24
22
 
25
23
  const { date: targetDate, computation, pass } = context;
@@ -38,7 +36,6 @@ async function recordRunAttempt(db, context, status, error = null, detailedMetri
38
36
  const anomalies = detailedMetrics.validation?.anomalies || [];
39
37
  if (error && error.message && error.message.includes('Data Integrity')) { anomalies.push(error.message); }
40
38
 
41
- // [IDEA 2] Prepare Execution Stats & Timings
42
39
  const rawExecStats = detailedMetrics.execution || {};
43
40
  const timings = rawExecStats.timings || {};
44
41
 
@@ -52,17 +49,28 @@ async function recordRunAttempt(db, context, status, error = null, detailedMetri
52
49
  durationMs: detailedMetrics.durationMs || 0,
53
50
  status: status,
54
51
 
55
- // [NEW] Trigger Context
52
+ // [NEW] Cost & Resource Analysis
53
+ resourceTier: resourceTier, // 'standard' or 'high-mem'
54
+ peakMemoryMB: detailedMetrics.peakMemoryMB || 0,
55
+
56
+ // [NEW] IO Operations (for Cost Calc)
57
+ firestoreOps: {
58
+ reads: detailedMetrics.io?.reads || 0,
59
+ writes: detailedMetrics.io?.writes || 0,
60
+ deletes: detailedMetrics.io?.deletes || 0
61
+ },
62
+
63
+ // [NEW] Code Linkage (Forensics)
64
+ composition: detailedMetrics.composition || null,
65
+
56
66
  trigger: {
57
67
  reason: triggerReason || 'Unknown',
58
68
  type: (triggerReason && triggerReason.includes('Layer')) ? 'CASCADE' : ((triggerReason && triggerReason.includes('New')) ? 'INIT' : 'UPDATE')
59
69
  },
60
70
 
61
- // [IDEA 2] Enhanced Execution Stats
62
71
  executionStats: {
63
72
  processedUsers: rawExecStats.processedUsers || 0,
64
73
  skippedUsers: rawExecStats.skippedUsers || 0,
65
- // Explicitly break out timings for BigQuery/Analysis
66
74
  timings: {
67
75
  setupMs: Math.round(timings.setup || 0),
68
76
  streamMs: Math.round(timings.stream || 0),
@@ -78,7 +86,7 @@ async function recordRunAttempt(db, context, status, error = null, detailedMetri
78
86
  },
79
87
 
80
88
  anomalies: anomalies,
81
- _schemaVersion: '2.2' // Bumped for profiler
89
+ _schemaVersion: '2.3' // Version Bump for Monitoring
82
90
  };
83
91
 
84
92
  if (error) {
@@ -90,10 +98,12 @@ async function recordRunAttempt(db, context, status, error = null, detailedMetri
90
98
  };
91
99
  }
92
100
 
101
+ // Aggregated Stats for Quick Dashboarding
93
102
  const statsUpdate = {
94
103
  lastRunAt: now,
95
104
  lastRunStatus: status,
96
- totalRuns: FieldValue.increment(1)
105
+ totalRuns: FieldValue.increment(1),
106
+ totalCostAccumulated: FieldValue.increment(0) // Placeholder for future cost adder
97
107
  };
98
108
 
99
109
  if (status === 'SUCCESS') { statsUpdate.successCount = FieldValue.increment(1);