@aztec/aztec-node 0.0.0-test.1 → 0.0.1-commit.b655e406

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/dest/aztec-node/config.d.ts +14 -9
  2. package/dest/aztec-node/config.d.ts.map +1 -1
  3. package/dest/aztec-node/config.js +75 -14
  4. package/dest/aztec-node/node_metrics.d.ts +4 -0
  5. package/dest/aztec-node/node_metrics.d.ts.map +1 -1
  6. package/dest/aztec-node/node_metrics.js +21 -0
  7. package/dest/aztec-node/server.d.ts +90 -50
  8. package/dest/aztec-node/server.d.ts.map +1 -1
  9. package/dest/aztec-node/server.js +526 -218
  10. package/dest/bin/index.js +4 -2
  11. package/dest/index.d.ts +0 -1
  12. package/dest/index.d.ts.map +1 -1
  13. package/dest/index.js +0 -1
  14. package/dest/sentinel/config.d.ts +8 -0
  15. package/dest/sentinel/config.d.ts.map +1 -0
  16. package/dest/sentinel/config.js +29 -0
  17. package/dest/sentinel/factory.d.ts +9 -0
  18. package/dest/sentinel/factory.d.ts.map +1 -0
  19. package/dest/sentinel/factory.js +17 -0
  20. package/dest/sentinel/index.d.ts +3 -0
  21. package/dest/sentinel/index.d.ts.map +1 -0
  22. package/dest/sentinel/index.js +1 -0
  23. package/dest/sentinel/sentinel.d.ts +91 -0
  24. package/dest/sentinel/sentinel.d.ts.map +1 -0
  25. package/dest/sentinel/sentinel.js +391 -0
  26. package/dest/sentinel/store.d.ts +34 -0
  27. package/dest/sentinel/store.d.ts.map +1 -0
  28. package/dest/sentinel/store.js +169 -0
  29. package/dest/test/index.d.ts +31 -0
  30. package/dest/test/index.d.ts.map +1 -0
  31. package/dest/test/index.js +1 -0
  32. package/package.json +42 -32
  33. package/src/aztec-node/config.ts +128 -25
  34. package/src/aztec-node/node_metrics.ts +28 -0
  35. package/src/aztec-node/server.ts +684 -278
  36. package/src/bin/index.ts +4 -2
  37. package/src/index.ts +0 -1
  38. package/src/sentinel/config.ts +37 -0
  39. package/src/sentinel/factory.ts +36 -0
  40. package/src/sentinel/index.ts +8 -0
  41. package/src/sentinel/sentinel.ts +489 -0
  42. package/src/sentinel/store.ts +184 -0
  43. package/src/test/index.ts +32 -0
  44. package/dest/aztec-node/http_rpc_server.d.ts +0 -8
  45. package/dest/aztec-node/http_rpc_server.d.ts.map +0 -1
  46. package/dest/aztec-node/http_rpc_server.js +0 -9
  47. package/src/aztec-node/http_rpc_server.ts +0 -11
package/dest/bin/index.js CHANGED
@@ -1,7 +1,9 @@
1
1
  #!/usr/bin/env -S node --no-warnings
2
2
  import { createLogger } from '@aztec/foundation/log';
3
+ import { AztecNodeApiSchema } from '@aztec/stdlib/interfaces/client';
4
+ import { createTracedJsonRpcServer } from '@aztec/telemetry-client';
3
5
  import http from 'http';
4
- import { AztecNodeService, createAztecNodeRpcServer, getConfigEnvVars } from '../index.js';
6
+ import { AztecNodeService, getConfigEnvVars } from '../index.js';
5
7
  const { AZTEC_NODE_PORT = 8081, API_PREFIX = '' } = process.env;
6
8
  const logger = createLogger('node');
7
9
  /**
@@ -26,7 +28,7 @@ const logger = createLogger('node');
26
28
  process.once('SIGINT', shutdown);
27
29
  // eslint-disable-next-line @typescript-eslint/no-misused-promises
28
30
  process.once('SIGTERM', shutdown);
29
- const rpcServer = createAztecNodeRpcServer(aztecNode);
31
+ const rpcServer = createTracedJsonRpcServer(aztecNode, AztecNodeApiSchema);
30
32
  const app = rpcServer.getApp(API_PREFIX);
31
33
  // eslint-disable-next-line @typescript-eslint/no-misused-promises
32
34
  const httpServer = http.createServer(app.callback());
package/dest/index.d.ts CHANGED
@@ -1,4 +1,3 @@
1
1
  export * from './aztec-node/config.js';
2
2
  export * from './aztec-node/server.js';
3
- export * from './aztec-node/http_rpc_server.js';
4
3
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,wBAAwB,CAAC;AACvC,cAAc,wBAAwB,CAAC;AACvC,cAAc,iCAAiC,CAAC"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,wBAAwB,CAAC;AACvC,cAAc,wBAAwB,CAAC"}
package/dest/index.js CHANGED
@@ -1,3 +1,2 @@
1
1
  export * from './aztec-node/config.js';
2
2
  export * from './aztec-node/server.js';
3
- export * from './aztec-node/http_rpc_server.js';
@@ -0,0 +1,8 @@
1
+ import { type ConfigMappingsType } from '@aztec/foundation/config';
2
+ export type SentinelConfig = {
3
+ sentinelHistoryLengthInEpochs: number;
4
+ sentinelHistoricProvenPerformanceLengthInEpochs: number;
5
+ sentinelEnabled: boolean;
6
+ };
7
+ export declare const sentinelConfigMappings: ConfigMappingsType<SentinelConfig>;
8
+ //# sourceMappingURL=config.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"config.d.ts","sourceRoot":"","sources":["../../src/sentinel/config.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,kBAAkB,EAA2C,MAAM,0BAA0B,CAAC;AAE5G,MAAM,MAAM,cAAc,GAAG;IAC3B,6BAA6B,EAAE,MAAM,CAAC;IACtC,+CAA+C,EAAE,MAAM,CAAC;IACxD,eAAe,EAAE,OAAO,CAAC;CAC1B,CAAC;AAEF,eAAO,MAAM,sBAAsB,EAAE,kBAAkB,CAAC,cAAc,CA4BrE,CAAC"}
@@ -0,0 +1,29 @@
1
+ import { booleanConfigHelper, numberConfigHelper } from '@aztec/foundation/config';
2
+ export const sentinelConfigMappings = {
3
+ sentinelHistoryLengthInEpochs: {
4
+ description: 'The number of L2 epochs kept of history for each validator for computing their stats.',
5
+ env: 'SENTINEL_HISTORY_LENGTH_IN_EPOCHS',
6
+ ...numberConfigHelper(24)
7
+ },
8
+ /**
9
+ * The number of L2 epochs kept of proven performance history for each validator.
10
+ * This value must be large enough so that we have proven performance for every validator
11
+ * for at least slashInactivityConsecutiveEpochThreshold. Assuming this value is 3,
12
+ * and the committee size is 48, and we have 10k validators, then we pick 48 out of 10k each draw.
13
+ * For any fixed element, per-draw prob = 48/10000 = 0.0048.
14
+ * After n draws, count ~ Binomial(n, 0.0048). We want P(X >= 3).
15
+ * Results (exact binomial):
16
+ * - 90% chance: n = 1108
17
+ * - 95% chance: n = 1310
18
+ * - 99% chance: n = 1749
19
+ */ sentinelHistoricProvenPerformanceLengthInEpochs: {
20
+ description: 'The number of L2 epochs kept of proven performance history for each validator.',
21
+ env: 'SENTINEL_HISTORIC_PROVEN_PERFORMANCE_LENGTH_IN_EPOCHS',
22
+ ...numberConfigHelper(2000)
23
+ },
24
+ sentinelEnabled: {
25
+ description: 'Whether the sentinel is enabled or not.',
26
+ env: 'SENTINEL_ENABLED',
27
+ ...booleanConfigHelper(false)
28
+ }
29
+ };
@@ -0,0 +1,9 @@
1
+ import type { EpochCache } from '@aztec/epoch-cache';
2
+ import type { DataStoreConfig } from '@aztec/kv-store/config';
3
+ import type { P2PClient } from '@aztec/p2p';
4
+ import type { L2BlockSource } from '@aztec/stdlib/block';
5
+ import type { SlasherConfig } from '@aztec/stdlib/interfaces/server';
6
+ import type { SentinelConfig } from './config.js';
7
+ import { Sentinel } from './sentinel.js';
8
+ export declare function createSentinel(epochCache: EpochCache, archiver: L2BlockSource, p2p: P2PClient, config: SentinelConfig & DataStoreConfig & SlasherConfig, logger?: import("@aztec/foundation/log").Logger): Promise<Sentinel | undefined>;
9
+ //# sourceMappingURL=factory.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"factory.d.ts","sourceRoot":"","sources":["../../src/sentinel/factory.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,oBAAoB,CAAC;AAErD,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,wBAAwB,CAAC;AAE9D,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,YAAY,CAAC;AAC5C,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,qBAAqB,CAAC;AACzD,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,iCAAiC,CAAC;AAErE,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAClD,OAAO,EAAE,QAAQ,EAAE,MAAM,eAAe,CAAC;AAGzC,wBAAsB,cAAc,CAClC,UAAU,EAAE,UAAU,EACtB,QAAQ,EAAE,aAAa,EACvB,GAAG,EAAE,SAAS,EACd,MAAM,EAAE,cAAc,GAAG,eAAe,GAAG,aAAa,EACxD,MAAM,yCAAgC,GACrC,OAAO,CAAC,QAAQ,GAAG,SAAS,CAAC,CAiB/B"}
@@ -0,0 +1,17 @@
1
+ import { createLogger } from '@aztec/foundation/log';
2
+ import { createStore } from '@aztec/kv-store/lmdb-v2';
3
+ import { Sentinel } from './sentinel.js';
4
+ import { SentinelStore } from './store.js';
5
+ export async function createSentinel(epochCache, archiver, p2p, config, logger = createLogger('node:sentinel')) {
6
+ if (!config.sentinelEnabled) {
7
+ return undefined;
8
+ }
9
+ const kvStore = await createStore('sentinel', SentinelStore.SCHEMA_VERSION, config, createLogger('node:sentinel:lmdb'));
10
+ const storeHistoryLength = config.sentinelHistoryLengthInEpochs * epochCache.getL1Constants().epochDuration;
11
+ const storeHistoricProvenPerformanceLength = config.sentinelHistoricProvenPerformanceLengthInEpochs;
12
+ const sentinelStore = new SentinelStore(kvStore, {
13
+ historyLength: storeHistoryLength,
14
+ historicProvenPerformanceLength: storeHistoricProvenPerformanceLength
15
+ });
16
+ return new Sentinel(epochCache, archiver, p2p, sentinelStore, config, logger);
17
+ }
@@ -0,0 +1,3 @@
1
+ export { Sentinel } from './sentinel.js';
2
+ export type { ValidatorsStats, ValidatorStats, ValidatorStatusHistory, ValidatorStatusInSlot, } from '@aztec/stdlib/validators';
3
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/sentinel/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,QAAQ,EAAE,MAAM,eAAe,CAAC;AAEzC,YAAY,EACV,eAAe,EACf,cAAc,EACd,sBAAsB,EACtB,qBAAqB,GACtB,MAAM,0BAA0B,CAAC"}
@@ -0,0 +1 @@
1
+ export { Sentinel } from './sentinel.js';
@@ -0,0 +1,91 @@
1
+ import type { EpochCache } from '@aztec/epoch-cache';
2
+ import { EthAddress } from '@aztec/foundation/eth-address';
3
+ import { RunningPromise } from '@aztec/foundation/running-promise';
4
+ import { type L2TipsStore } from '@aztec/kv-store/stores';
5
+ import type { P2PClient } from '@aztec/p2p';
6
+ import { type Watcher, type WatcherEmitter } from '@aztec/slasher';
7
+ import type { SlasherConfig } from '@aztec/slasher/config';
8
+ import { type L2BlockSource, L2BlockStream, type L2BlockStreamEvent, type L2BlockStreamEventHandler } from '@aztec/stdlib/block';
9
+ import type { SingleValidatorStats, ValidatorStats, ValidatorStatusHistory, ValidatorStatusInSlot, ValidatorStatusType, ValidatorsEpochPerformance, ValidatorsStats } from '@aztec/stdlib/validators';
10
+ import { SentinelStore } from './store.js';
11
+ declare const Sentinel_base: new () => WatcherEmitter;
12
+ export declare class Sentinel extends Sentinel_base implements L2BlockStreamEventHandler, Watcher {
13
+ protected epochCache: EpochCache;
14
+ protected archiver: L2BlockSource;
15
+ protected p2p: P2PClient;
16
+ protected store: SentinelStore;
17
+ protected config: Pick<SlasherConfig, 'slashInactivityTargetPercentage' | 'slashInactivityPenalty' | 'slashInactivityConsecutiveEpochThreshold'>;
18
+ protected logger: import("@aztec/foundation/log").Logger;
19
+ protected runningPromise: RunningPromise;
20
+ protected blockStream: L2BlockStream;
21
+ protected l2TipsStore: L2TipsStore;
22
+ protected initialSlot: bigint | undefined;
23
+ protected lastProcessedSlot: bigint | undefined;
24
+ protected slotNumberToBlock: Map<bigint, {
25
+ blockNumber: number;
26
+ archive: string;
27
+ attestors: EthAddress[];
28
+ }>;
29
+ constructor(epochCache: EpochCache, archiver: L2BlockSource, p2p: P2PClient, store: SentinelStore, config: Pick<SlasherConfig, 'slashInactivityTargetPercentage' | 'slashInactivityPenalty' | 'slashInactivityConsecutiveEpochThreshold'>, logger?: import("@aztec/foundation/log").Logger);
30
+ updateConfig(config: Partial<SlasherConfig>): void;
31
+ start(): Promise<void>;
32
+ /** Loads initial slot and initializes blockstream. We will not process anything at or before the initial slot. */
33
+ protected init(): Promise<void>;
34
+ stop(): Promise<void>;
35
+ handleBlockStreamEvent(event: L2BlockStreamEvent): Promise<void>;
36
+ protected handleChainProven(event: L2BlockStreamEvent): Promise<void>;
37
+ protected computeProvenPerformance(epoch: bigint): Promise<ValidatorsEpochPerformance>;
38
+ /**
39
+ * Checks if a validator has been inactive for the specified number of consecutive epochs for which we have data on it.
40
+ * @param validator The validator address to check
41
+ * @param currentEpoch Epochs strictly before the current one are evaluated only
42
+ * @param requiredConsecutiveEpochs Number of consecutive epochs required for slashing
43
+ */
44
+ protected checkPastInactivity(validator: EthAddress, currentEpoch: bigint, requiredConsecutiveEpochs: number): Promise<boolean>;
45
+ protected handleProvenPerformance(epoch: bigint, performance: ValidatorsEpochPerformance): Promise<void>;
46
+ /**
47
+ * Process data for two L2 slots ago.
48
+ * Note that we do not process historical data, since we rely on p2p data for processing,
49
+ * and we don't have that data if we were offline during the period.
50
+ */
51
+ work(): Promise<void>;
52
+ /**
53
+ * Check if we are ready to process data for two L2 slots ago, so we allow plenty of time for p2p to process all in-flight attestations.
54
+ * We also don't move past the archiver last synced L2 slot, as we don't want to process data that is not yet available.
55
+ * Last, we check the p2p is synced with the archiver, so it has pulled all attestations from it.
56
+ */
57
+ protected isReadyToProcess(currentSlot: bigint): Promise<bigint | false>;
58
+ /**
59
+ * Gathers committee and proposer data for a given slot, computes slot stats,
60
+ * and updates overall stats.
61
+ */
62
+ protected processSlot(slot: bigint): Promise<void>;
63
+ /** Computes activity for a given slot. */
64
+ protected getSlotActivity(slot: bigint, epoch: bigint, proposer: EthAddress, committee: EthAddress[]): Promise<{
65
+ [k: string]: ValidatorStatusInSlot | undefined;
66
+ }>;
67
+ /** Push the status for each slot for each validator. */
68
+ protected updateValidators(slot: bigint, stats: Record<`0x${string}`, ValidatorStatusInSlot | undefined>): Promise<void>;
69
+ /** Computes stats to be returned based on stored data. */
70
+ computeStats({ fromSlot, toSlot, validators, }?: {
71
+ fromSlot?: bigint;
72
+ toSlot?: bigint;
73
+ validators?: EthAddress[];
74
+ }): Promise<ValidatorsStats>;
75
+ /** Computes stats for a single validator. */
76
+ getValidatorStats(validatorAddress: EthAddress, fromSlot?: bigint, toSlot?: bigint): Promise<SingleValidatorStats | undefined>;
77
+ protected computeStatsForValidator(address: `0x${string}`, allHistory: ValidatorStatusHistory, fromSlot?: bigint, toSlot?: bigint): ValidatorStats;
78
+ protected computeMissed(history: ValidatorStatusHistory, computeOverPrefix: ValidatorStatusType | undefined, filter: ValidatorStatusInSlot[]): {
79
+ currentStreak: number;
80
+ rate: number | undefined;
81
+ count: number;
82
+ total: number;
83
+ };
84
+ protected computeFromSlot(slot: bigint | undefined): {
85
+ timestamp: bigint;
86
+ slot: bigint;
87
+ date: string;
88
+ } | undefined;
89
+ }
90
+ export {};
91
+ //# sourceMappingURL=sentinel.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"sentinel.d.ts","sourceRoot":"","sources":["../../src/sentinel/sentinel.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,oBAAoB,CAAC;AAErD,OAAO,EAAE,UAAU,EAAE,MAAM,+BAA+B,CAAC;AAE3D,OAAO,EAAE,cAAc,EAAE,MAAM,mCAAmC,CAAC;AACnE,OAAO,EAAqB,KAAK,WAAW,EAAE,MAAM,wBAAwB,CAAC;AAC7E,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,YAAY,CAAC;AAC5C,OAAO,EAIL,KAAK,OAAO,EACZ,KAAK,cAAc,EACpB,MAAM,gBAAgB,CAAC;AACxB,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,uBAAuB,CAAC;AAC3D,OAAO,EACL,KAAK,aAAa,EAClB,aAAa,EACb,KAAK,kBAAkB,EACvB,KAAK,yBAAyB,EAE/B,MAAM,qBAAqB,CAAC;AAE7B,OAAO,KAAK,EACV,oBAAoB,EACpB,cAAc,EACd,sBAAsB,EACtB,qBAAqB,EACrB,mBAAmB,EACnB,0BAA0B,EAC1B,eAAe,EAChB,MAAM,0BAA0B,CAAC;AAIlC,OAAO,EAAE,aAAa,EAAE,MAAM,YAAY,CAAC;6BAEI,UAAU,cAAc;AAAvE,qBAAa,QAAS,SAAQ,aAA2C,YAAW,yBAAyB,EAAE,OAAO;IAWlH,SAAS,CAAC,UAAU,EAAE,UAAU;IAChC,SAAS,CAAC,QAAQ,EAAE,aAAa;IACjC,SAAS,CAAC,GAAG,EAAE,SAAS;IACxB,SAAS,CAAC,KAAK,EAAE,aAAa;IAC9B,SAAS,CAAC,MAAM,EAAE,IAAI,CACpB,aAAa,EACb,iCAAiC,GAAG,wBAAwB,GAAG,0CAA0C,CAC1G;IACD,SAAS,CAAC,MAAM;IAlBlB,SAAS,CAAC,cAAc,EAAE,cAAc,CAAC;IACzC,SAAS,CAAC,WAAW,EAAG,aAAa,CAAC;IACtC,SAAS,CAAC,WAAW,EAAE,WAAW,CAAC;IAEnC,SAAS,CAAC,WAAW,EAAE,MAAM,GAAG,SAAS,CAAC;IAC1C,SAAS,CAAC,iBAAiB,EAAE,MAAM,GAAG,SAAS,CAAC;IAChD,SAAS,CAAC,iBAAiB,EAAE,GAAG,CAAC,MAAM,EAAE;QAAE,WAAW,EAAE,MAAM,CAAC;QAAC,OAAO,EAAE,MAAM,CAAC;QAAC,SAAS,EAAE,UAAU,EAAE,CAAA;KAAE,CAAC,CAC/F;gBAGA,UAAU,EAAE,UAAU,EACtB,QAAQ,EAAE,aAAa,EACvB,GAAG,EAAE,SAAS,EACd,KAAK,EAAE,aAAa,EACpB,MAAM,EAAE,IAAI,CACpB,aAAa,EACb,iCAAiC,GAAG,wBAAwB,GAAG,0CAA0C,CAC1G,EACS,MAAM,yCAAgC;IAQ3C,YAAY,CAAC,MAAM,EAAE,OAAO,CAAC,aAAa,CAAC;IAIrC,KAAK;IAKlB,kHAAkH;cAClG,IAAI;IAOb,IAAI;IAIE,sBAAsB,CAAC,KAAK,EAAE,kBAAkB,GAAG,OAAO,CAAC,IAAI,CAAC;cA6B7D,iBAAiB,CAAC,KAAK,EAAE,kBAAkB;cAsB3C,wBAAwB,CAAC,KAAK,EAAE,MAAM,GAAG,OAAO,CAAC,0BAA0B,CAAC;IAuB5F;;;;;OAKG;cACa,mBAAmB,CACjC,SAAS,EAAE,UAAU,EACrB,YAAY,EAAE,MAAM,EACpB,yBAAyB,EAAE,MAAM,GAChC,OAAO,CAAC,OAAO,CAAC;cAwBH,uBAAuB,CAAC,KAAK,EAAE,MAAM,EAAE,WAAW,EAAE,0BAA0B;IAoC9F;;;;OAIG;IACU,IAAI;IAmBjB;;;;OAIG;cACa,gBAAgB,CAAC,WAAW,EAAE,MAAM;IAkCpD;;;OAGG;cACa,WAAW,CAAC,IAAI,EAAE,MAAM;IAexC,0CAA0C;cAC1B,eAAe,CAAC,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,EAAE,QAAQ,EAAE,UAAU,EAAE,SAAS,EAAE,UAAU,EAAE;;;IA6D1G,wDAAwD;IACxD,SAAS,CAAC,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,CAAC,KAAK,MAAM,EAAE,EAAE,qBAAqB,GAAG,SAAS,CAAC;IAIxG,0DAA0D;IAC7C,YAAY,CAAC,EACxB,QAAQ,EACR,MAAM,EACN,UAAU,GACX,GAAE;QAAE,QAAQ,CAAC,EAAE,MAAM,CAAC;QAAC,MAAM,CAAC,EAAE,MAAM,CAAC;QAAC,UAAU,CAAC,EAAE,UAAU,EAAE,CAAA;KAAO,GAAG,OAAO,CAAC,eAAe,CAAC;IAqBpG,6CAA6C;IAChC,iBAAiB,CAC5B,gBAAgB,EAAE,UAAU,EAC5B,QAAQ,CAAC,EAAE,MAAM,EACjB,MAAM,CAAC,EAAE,MAAM,GACd,OAAO,CAAC,oBAAoB,GAAG,SAAS,CAAC;IAoC5C,SAAS,CAAC,wBAAwB,CAChC,OAAO,EAAE,KAAK,MAAM,EAAE,EACtB,UAAU,EAAE,sBAAsB,EAClC,QAAQ,CAAC,EAAE,MAAM,EACjB,MAAM,CAAC,EAAE,MAAM,GACd,cAAc;IAgBjB,SAAS,CAAC,aAAa,CACrB,OAAO,EAAE,sBAAsB,EAC/B,iBAAiB,EAAE,mBAAmB,GAAG,SAAS,EAClD,MAAM,EAAE,qBAAqB,EAAE;;;;;;IAYjC,SAAS,CAAC,eAAe,CAAC,IAAI,EAAE,MAAM,GAAG,SAAS;;;;;CAOnD"}
@@ -0,0 +1,391 @@
1
+ import { countWhile, filterAsync, fromEntries, getEntries, mapValues } from '@aztec/foundation/collection';
2
+ import { EthAddress } from '@aztec/foundation/eth-address';
3
+ import { createLogger } from '@aztec/foundation/log';
4
+ import { RunningPromise } from '@aztec/foundation/running-promise';
5
+ import { L2TipsMemoryStore } from '@aztec/kv-store/stores';
6
+ import { OffenseType, WANT_TO_SLASH_EVENT } from '@aztec/slasher';
7
+ import { L2BlockStream, getAttestationInfoFromPublishedL2Block } from '@aztec/stdlib/block';
8
+ import { getEpochAtSlot, getSlotRangeForEpoch, getTimestampForSlot } from '@aztec/stdlib/epoch-helpers';
9
+ import EventEmitter from 'node:events';
10
+ export class Sentinel extends EventEmitter {
11
+ epochCache;
12
+ archiver;
13
+ p2p;
14
+ store;
15
+ config;
16
+ logger;
17
+ runningPromise;
18
+ blockStream;
19
+ l2TipsStore;
20
+ initialSlot;
21
+ lastProcessedSlot;
22
+ slotNumberToBlock;
23
+ constructor(epochCache, archiver, p2p, store, config, logger = createLogger('node:sentinel')){
24
+ super(), this.epochCache = epochCache, this.archiver = archiver, this.p2p = p2p, this.store = store, this.config = config, this.logger = logger, this.slotNumberToBlock = new Map();
25
+ this.l2TipsStore = new L2TipsMemoryStore();
26
+ const interval = epochCache.getL1Constants().ethereumSlotDuration * 1000 / 4;
27
+ this.runningPromise = new RunningPromise(this.work.bind(this), logger, interval);
28
+ }
29
+ updateConfig(config) {
30
+ this.config = {
31
+ ...this.config,
32
+ ...config
33
+ };
34
+ }
35
+ async start() {
36
+ await this.init();
37
+ this.runningPromise.start();
38
+ }
39
+ /** Loads initial slot and initializes blockstream. We will not process anything at or before the initial slot. */ async init() {
40
+ this.initialSlot = this.epochCache.getEpochAndSlotNow().slot;
41
+ const startingBlock = await this.archiver.getBlockNumber();
42
+ this.logger.info(`Starting validator sentinel with initial slot ${this.initialSlot} and block ${startingBlock}`);
43
+ this.blockStream = new L2BlockStream(this.archiver, this.l2TipsStore, this, this.logger, {
44
+ startingBlock
45
+ });
46
+ }
47
+ stop() {
48
+ return this.runningPromise.stop();
49
+ }
50
+ async handleBlockStreamEvent(event) {
51
+ await this.l2TipsStore.handleBlockStreamEvent(event);
52
+ if (event.type === 'blocks-added') {
53
+ // Store mapping from slot to archive, block number, and attestors
54
+ for (const block of event.blocks){
55
+ this.slotNumberToBlock.set(block.block.header.getSlot(), {
56
+ blockNumber: block.block.number,
57
+ archive: block.block.archive.root.toString(),
58
+ attestors: getAttestationInfoFromPublishedL2Block(block).filter((a)=>a.status === 'recovered-from-signature').map((a)=>a.address)
59
+ });
60
+ }
61
+ // Prune the archive map to only keep at most N entries
62
+ const historyLength = this.store.getHistoryLength();
63
+ if (this.slotNumberToBlock.size > historyLength) {
64
+ const toDelete = Array.from(this.slotNumberToBlock.keys()).sort((a, b)=>Number(a - b)).slice(0, this.slotNumberToBlock.size - historyLength);
65
+ for (const key of toDelete){
66
+ this.slotNumberToBlock.delete(key);
67
+ }
68
+ }
69
+ } else if (event.type === 'chain-proven') {
70
+ await this.handleChainProven(event);
71
+ }
72
+ }
73
+ async handleChainProven(event) {
74
+ if (event.type !== 'chain-proven') {
75
+ return;
76
+ }
77
+ const blockNumber = event.block.number;
78
+ const block = await this.archiver.getBlock(blockNumber);
79
+ if (!block) {
80
+ this.logger.error(`Failed to get block ${blockNumber}`, {
81
+ block
82
+ });
83
+ return;
84
+ }
85
+ // TODO(palla/slash): We should only be computing proven performance if this is
86
+ // a full proof epoch and not a partial one, otherwise we'll end up with skewed stats.
87
+ const epoch = getEpochAtSlot(block.header.getSlot(), this.epochCache.getL1Constants());
88
+ this.logger.debug(`Computing proven performance for epoch ${epoch}`);
89
+ const performance = await this.computeProvenPerformance(epoch);
90
+ this.logger.info(`Computed proven performance for epoch ${epoch}`, performance);
91
+ await this.store.updateProvenPerformance(epoch, performance);
92
+ await this.handleProvenPerformance(epoch, performance);
93
+ }
94
+ async computeProvenPerformance(epoch) {
95
+ const [fromSlot, toSlot] = getSlotRangeForEpoch(epoch, this.epochCache.getL1Constants());
96
+ const { committee } = await this.epochCache.getCommittee(fromSlot);
97
+ if (!committee) {
98
+ this.logger.trace(`No committee found for slot ${fromSlot}`);
99
+ return {};
100
+ }
101
+ const stats = await this.computeStats({
102
+ fromSlot,
103
+ toSlot,
104
+ validators: committee
105
+ });
106
+ this.logger.debug(`Stats for epoch ${epoch}`, {
107
+ ...stats,
108
+ fromSlot,
109
+ toSlot,
110
+ epoch
111
+ });
112
+ // Note that we are NOT using the total slots in the epoch as `total` here, since we only
113
+ // compute missed attestations over the blocks that had a proposal in them. So, let's say
114
+ // we have an epoch with 10 slots, but only 5 had a block proposal. A validator that was
115
+ // offline, assuming they were not picked as proposer, will then be reported as having missed
116
+ // 5/5 attestations. If we used the total, they'd be reported as 5/10, which would probably
117
+ // allow them to avoid being slashed.
118
+ return mapValues(stats.stats, (stat)=>({
119
+ missed: stat.missedAttestations.count + stat.missedProposals.count,
120
+ total: stat.missedAttestations.total + stat.missedProposals.total
121
+ }));
122
+ }
123
+ /**
124
+ * Checks if a validator has been inactive for the specified number of consecutive epochs for which we have data on it.
125
+ * @param validator The validator address to check
126
+ * @param currentEpoch Epochs strictly before the current one are evaluated only
127
+ * @param requiredConsecutiveEpochs Number of consecutive epochs required for slashing
128
+ */ async checkPastInactivity(validator, currentEpoch, requiredConsecutiveEpochs) {
129
+ if (requiredConsecutiveEpochs === 0) {
130
+ return true;
131
+ }
132
+ // Get all historical performance for this validator
133
+ const allPerformance = await this.store.getProvenPerformance(validator);
134
+ // If we don't have enough historical data, don't slash
135
+ if (allPerformance.length < requiredConsecutiveEpochs) {
136
+ this.logger.debug(`Not enough historical data for slashing ${validator} for inactivity (${allPerformance.length} epochs < ${requiredConsecutiveEpochs} required)`);
137
+ return false;
138
+ }
139
+ // Sort by epoch descending to get most recent first, keep only epochs strictly before the current one, and get the first N
140
+ return allPerformance.sort((a, b)=>Number(b.epoch - a.epoch)).filter((p)=>p.epoch < currentEpoch).slice(0, requiredConsecutiveEpochs).every((p)=>p.missed / p.total >= this.config.slashInactivityTargetPercentage);
141
+ }
142
+ async handleProvenPerformance(epoch, performance) {
143
+ if (this.config.slashInactivityPenalty === 0n) {
144
+ return;
145
+ }
146
+ const inactiveValidators = getEntries(performance).filter(([_, { missed, total }])=>missed / total >= this.config.slashInactivityTargetPercentage).map(([address])=>address);
147
+ this.logger.debug(`Found ${inactiveValidators.length} inactive validators in epoch ${epoch}`, {
148
+ inactiveValidators,
149
+ epoch,
150
+ inactivityTargetPercentage: this.config.slashInactivityTargetPercentage
151
+ });
152
+ const epochThreshold = this.config.slashInactivityConsecutiveEpochThreshold;
153
+ const criminals = await filterAsync(inactiveValidators, (address)=>this.checkPastInactivity(EthAddress.fromString(address), epoch, epochThreshold - 1));
154
+ const args = criminals.map((address)=>({
155
+ validator: EthAddress.fromString(address),
156
+ amount: this.config.slashInactivityPenalty,
157
+ offenseType: OffenseType.INACTIVITY,
158
+ epochOrSlot: epoch
159
+ }));
160
+ if (criminals.length > 0) {
161
+ this.logger.verbose(`Identified ${criminals.length} validators to slash due to inactivity in at least ${epochThreshold} consecutive epochs`, {
162
+ ...args,
163
+ epochThreshold
164
+ });
165
+ this.emit(WANT_TO_SLASH_EVENT, args);
166
+ }
167
+ }
168
+ /**
169
+ * Process data for two L2 slots ago.
170
+ * Note that we do not process historical data, since we rely on p2p data for processing,
171
+ * and we don't have that data if we were offline during the period.
172
+ */ async work() {
173
+ const { slot: currentSlot } = this.epochCache.getEpochAndSlotNow();
174
+ try {
175
+ // Manually sync the block stream to ensure we have the latest data.
176
+ // Note we never `start` the blockstream, so it loops at the same pace as we do.
177
+ await this.blockStream.sync();
178
+ // Check if we are ready to process data for two L2 slots ago.
179
+ const targetSlot = await this.isReadyToProcess(currentSlot);
180
+ // And process it if we are.
181
+ if (targetSlot !== false) {
182
+ await this.processSlot(targetSlot);
183
+ }
184
+ } catch (err) {
185
+ this.logger.error(`Failed to process slot ${currentSlot}`, err);
186
+ }
187
+ }
188
+ /**
189
+ * Check if we are ready to process data for two L2 slots ago, so we allow plenty of time for p2p to process all in-flight attestations.
190
+ * We also don't move past the archiver last synced L2 slot, as we don't want to process data that is not yet available.
191
+ * Last, we check the p2p is synced with the archiver, so it has pulled all attestations from it.
192
+ */ async isReadyToProcess(currentSlot) {
193
+ const targetSlot = currentSlot - 2n;
194
+ if (this.lastProcessedSlot && this.lastProcessedSlot >= targetSlot) {
195
+ this.logger.trace(`Already processed slot ${targetSlot}`, {
196
+ lastProcessedSlot: this.lastProcessedSlot
197
+ });
198
+ return false;
199
+ }
200
+ if (this.initialSlot === undefined) {
201
+ this.logger.error(`Initial slot not loaded.`);
202
+ return false;
203
+ }
204
+ if (targetSlot <= this.initialSlot) {
205
+ this.logger.trace(`Refusing to process slot ${targetSlot} given initial slot ${this.initialSlot}`);
206
+ return false;
207
+ }
208
+ const archiverSlot = await this.archiver.getL2SlotNumber();
209
+ if (archiverSlot === undefined || archiverSlot < targetSlot) {
210
+ this.logger.debug(`Waiting for archiver to sync with L2 slot ${targetSlot}`, {
211
+ archiverSlot,
212
+ targetSlot
213
+ });
214
+ return false;
215
+ }
216
+ const archiverLastBlockHash = await this.l2TipsStore.getL2Tips().then((tip)=>tip.latest.hash);
217
+ const p2pLastBlockHash = await this.p2p.getL2Tips().then((tips)=>tips.latest.hash);
218
+ const isP2pSynced = archiverLastBlockHash === p2pLastBlockHash;
219
+ if (!isP2pSynced) {
220
+ this.logger.debug(`Waiting for P2P client to sync with archiver`, {
221
+ archiverLastBlockHash,
222
+ p2pLastBlockHash
223
+ });
224
+ return false;
225
+ }
226
+ return targetSlot;
227
+ }
228
+ /**
229
+ * Gathers committee and proposer data for a given slot, computes slot stats,
230
+ * and updates overall stats.
231
+ */ async processSlot(slot) {
232
+ const { epoch, seed, committee } = await this.epochCache.getCommittee(slot);
233
+ if (!committee || committee.length === 0) {
234
+ this.logger.trace(`No committee found for slot ${slot} at epoch ${epoch}`);
235
+ this.lastProcessedSlot = slot;
236
+ return;
237
+ }
238
+ const proposerIndex = this.epochCache.computeProposerIndex(slot, epoch, seed, BigInt(committee.length));
239
+ const proposer = committee[Number(proposerIndex)];
240
+ const stats = await this.getSlotActivity(slot, epoch, proposer, committee);
241
+ this.logger.verbose(`Updating L2 slot ${slot} observed activity`, stats);
242
+ await this.updateValidators(slot, stats);
243
+ this.lastProcessedSlot = slot;
244
+ }
245
+ /** Computes activity for a given slot. */ async getSlotActivity(slot, epoch, proposer, committee) {
246
+ this.logger.debug(`Computing stats for slot ${slot} at epoch ${epoch}`, {
247
+ slot,
248
+ epoch,
249
+ proposer,
250
+ committee
251
+ });
252
+ // Check if there is an L2 block in L1 for this L2 slot
253
+ // Here we get all attestations for the block mined at the given slot,
254
+ // or all attestations for all proposals in the slot if no block was mined.
255
+ // We gather from both p2p (contains the ones seen on the p2p layer) and archiver
256
+ // (contains the ones synced from mined blocks, which we may have missed from p2p).
257
+ const block = this.slotNumberToBlock.get(slot);
258
+ const p2pAttested = await this.p2p.getAttestationsForSlot(slot, block?.archive);
259
+ // Filter out attestations with invalid signatures
260
+ const p2pAttestors = p2pAttested.map((a)=>a.getSender()).filter((s)=>s !== undefined);
261
+ const attestors = new Set([
262
+ ...p2pAttestors.map((a)=>a.toString()),
263
+ ...block?.attestors.map((a)=>a.toString()) ?? []
264
+ ].filter((addr)=>proposer.toString() !== addr));
265
+ // We assume that there was a block proposal if at least one of the validators (other than the proposer) attested to it.
266
+ // It could be the case that every single validator failed, and we could differentiate it by having
267
+ // this node re-execute every block proposal it sees and storing it in the attestation pool.
268
+ // But we'll leave that corner case out to reduce pressure on the node.
269
+ // TODO(palla/slash): This breaks if a given node has more than one validator in the current committee,
270
+ // since they will attest to their own proposal it even if it's not re-executable.
271
+ const blockStatus = block ? 'mined' : attestors.size > 0 ? 'proposed' : 'missed';
272
+ this.logger.debug(`Block for slot ${slot} was ${blockStatus}`, {
273
+ ...block,
274
+ slot
275
+ });
276
+ // Get attestors that failed their duties for this block, but only if there was a block proposed
277
+ const missedAttestors = new Set(blockStatus === 'missed' ? [] : committee.filter((v)=>!attestors.has(v.toString()) && !proposer.equals(v)).map((v)=>v.toString()));
278
+ this.logger.debug(`Retrieved ${attestors.size} attestors out of ${committee.length} for slot ${slot}`, {
279
+ blockStatus,
280
+ proposer: proposer.toString(),
281
+ ...block,
282
+ slot,
283
+ attestors: [
284
+ ...attestors
285
+ ],
286
+ missedAttestors: [
287
+ ...missedAttestors
288
+ ],
289
+ committee: committee.map((c)=>c.toString())
290
+ });
291
+ // Compute the status for each validator in the committee
292
+ const statusFor = (who)=>{
293
+ if (who === proposer.toString()) {
294
+ return `block-${blockStatus}`;
295
+ } else if (attestors.has(who)) {
296
+ return 'attestation-sent';
297
+ } else if (missedAttestors.has(who)) {
298
+ return 'attestation-missed';
299
+ } else {
300
+ return undefined;
301
+ }
302
+ };
303
+ return Object.fromEntries(committee.map((v)=>v.toString()).map((who)=>[
304
+ who,
305
+ statusFor(who)
306
+ ]));
307
+ }
308
+ /** Push the status for each slot for each validator. */ updateValidators(slot, stats) {
309
+ return this.store.updateValidators(slot, stats);
310
+ }
311
+ /** Computes stats to be returned based on stored data. */ async computeStats({ fromSlot, toSlot, validators } = {}) {
312
+ const histories = validators ? fromEntries(await Promise.all(validators.map(async (v)=>[
313
+ v.toString(),
314
+ await this.store.getHistory(v)
315
+ ]))) : await this.store.getHistories();
316
+ const slotNow = this.epochCache.getEpochAndSlotNow().slot;
317
+ fromSlot ??= (this.lastProcessedSlot ?? slotNow) - BigInt(this.store.getHistoryLength());
318
+ toSlot ??= this.lastProcessedSlot ?? slotNow;
319
+ const stats = mapValues(histories, (history, address)=>this.computeStatsForValidator(address, history ?? [], fromSlot, toSlot));
320
+ return {
321
+ stats,
322
+ lastProcessedSlot: this.lastProcessedSlot,
323
+ initialSlot: this.initialSlot,
324
+ slotWindow: this.store.getHistoryLength()
325
+ };
326
+ }
327
+ /** Computes stats for a single validator. */ async getValidatorStats(validatorAddress, fromSlot, toSlot) {
328
+ const history = await this.store.getHistory(validatorAddress);
329
+ if (!history || history.length === 0) {
330
+ return undefined;
331
+ }
332
+ const slotNow = this.epochCache.getEpochAndSlotNow().slot;
333
+ const effectiveFromSlot = fromSlot ?? (this.lastProcessedSlot ?? slotNow) - BigInt(this.store.getHistoryLength());
334
+ const effectiveToSlot = toSlot ?? this.lastProcessedSlot ?? slotNow;
335
+ const historyLength = BigInt(this.store.getHistoryLength());
336
+ if (effectiveToSlot - effectiveFromSlot > historyLength) {
337
+ throw new Error(`Slot range (${effectiveToSlot - effectiveFromSlot}) exceeds history length (${historyLength}). ` + `Requested range: ${effectiveFromSlot} to ${effectiveToSlot}.`);
338
+ }
339
+ const validator = this.computeStatsForValidator(validatorAddress.toString(), history, effectiveFromSlot, effectiveToSlot);
340
+ const allTimeProvenPerformance = await this.store.getProvenPerformance(validatorAddress);
341
+ return {
342
+ validator,
343
+ allTimeProvenPerformance,
344
+ lastProcessedSlot: this.lastProcessedSlot,
345
+ initialSlot: this.initialSlot,
346
+ slotWindow: this.store.getHistoryLength()
347
+ };
348
+ }
349
+ computeStatsForValidator(address, allHistory, fromSlot, toSlot) {
350
+ let history = fromSlot ? allHistory.filter((h)=>h.slot >= fromSlot) : allHistory;
351
+ history = toSlot ? history.filter((h)=>h.slot <= toSlot) : history;
352
+ const lastProposal = history.filter((h)=>h.status === 'block-proposed' || h.status === 'block-mined').at(-1);
353
+ const lastAttestation = history.filter((h)=>h.status === 'attestation-sent').at(-1);
354
+ return {
355
+ address: EthAddress.fromString(address),
356
+ lastProposal: this.computeFromSlot(lastProposal?.slot),
357
+ lastAttestation: this.computeFromSlot(lastAttestation?.slot),
358
+ totalSlots: history.length,
359
+ missedProposals: this.computeMissed(history, 'block', [
360
+ 'block-missed'
361
+ ]),
362
+ missedAttestations: this.computeMissed(history, 'attestation', [
363
+ 'attestation-missed'
364
+ ]),
365
+ history
366
+ };
367
+ }
368
+ computeMissed(history, computeOverPrefix, filter) {
369
+ const relevantHistory = history.filter((h)=>!computeOverPrefix || h.status.startsWith(computeOverPrefix));
370
+ const filteredHistory = relevantHistory.filter((h)=>filter.includes(h.status));
371
+ return {
372
+ currentStreak: countWhile([
373
+ ...relevantHistory
374
+ ].reverse(), (h)=>filter.includes(h.status)),
375
+ rate: relevantHistory.length === 0 ? undefined : filteredHistory.length / relevantHistory.length,
376
+ count: filteredHistory.length,
377
+ total: relevantHistory.length
378
+ };
379
+ }
380
+ computeFromSlot(slot) {
381
+ if (slot === undefined) {
382
+ return undefined;
383
+ }
384
+ const timestamp = getTimestampForSlot(slot, this.epochCache.getL1Constants());
385
+ return {
386
+ timestamp,
387
+ slot,
388
+ date: new Date(Number(timestamp) * 1000).toISOString()
389
+ };
390
+ }
391
+ }
@@ -0,0 +1,34 @@
1
+ import { EthAddress } from '@aztec/foundation/eth-address';
2
+ import type { AztecAsyncKVStore } from '@aztec/kv-store';
3
+ import type { ValidatorStatusHistory, ValidatorStatusInSlot, ValidatorsEpochPerformance } from '@aztec/stdlib/validators';
4
+ export declare class SentinelStore {
5
+ private store;
6
+ private config;
7
+ static readonly SCHEMA_VERSION = 2;
8
+ private readonly historyMap;
9
+ private readonly provenMap;
10
+ constructor(store: AztecAsyncKVStore, config: {
11
+ historyLength: number;
12
+ historicProvenPerformanceLength: number;
13
+ });
14
+ getHistoryLength(): number;
15
+ getHistoricProvenPerformanceLength(): number;
16
+ updateProvenPerformance(epoch: bigint, performance: ValidatorsEpochPerformance): Promise<void>;
17
+ getProvenPerformance(who: EthAddress): Promise<{
18
+ missed: number;
19
+ total: number;
20
+ epoch: bigint;
21
+ }[]>;
22
+ private pushValidatorProvenPerformanceForEpoch;
23
+ updateValidators(slot: bigint, statuses: Record<`0x${string}`, ValidatorStatusInSlot | undefined>): Promise<void>;
24
+ private pushValidatorStatusForSlot;
25
+ getHistories(): Promise<Record<`0x${string}`, ValidatorStatusHistory>>;
26
+ getHistory(address: EthAddress): Promise<ValidatorStatusHistory | undefined>;
27
+ private serializePerformance;
28
+ private deserializePerformance;
29
+ private serializeHistory;
30
+ private deserializeHistory;
31
+ private statusToNumber;
32
+ private statusFromNumber;
33
+ }
34
+ //# sourceMappingURL=store.d.ts.map