@sqd-pipes/delta-db 0.0.1-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,83 @@
1
+ # @subsquid/delta-db
2
+
3
+ Embedded rollback-aware computation engine for blockchain data. Routes raw events through **reducers** and **materialized views**, producing delta records (insert/update/delete) for downstream targets.
4
+
5
+ ## Build
6
+
7
+ Requires **Rust** (stable) and **Node.js** >= 18.
8
+
9
+ ```bash
10
+ cd packages/delta-db-node
11
+ npm install
12
+
13
+ # Debug build
14
+ npx napi build --cargo-cwd ../.. --features napi
15
+
16
+ # Release build
17
+ npx napi build --cargo-cwd ../.. --features napi --release
18
+ ```
19
+
20
+ This produces `delta-db.node` (native binary) and regenerates `index.d.ts`.
21
+
22
+ If `index.js` is missing after build, create it:
23
+
24
+ ```js
25
+ const { DeltaDb } = require('./delta-db.node')
26
+ module.exports.DeltaDb = DeltaDb
27
+ ```
28
+
29
+ ## Test
30
+
31
+ ```bash
32
+ npm test
33
+ ```
34
+
35
+ ## Usage
36
+
37
+ ### Direct API
38
+
39
+ ```typescript
40
+ import { DeltaDb } from '@subsquid/delta-db'
41
+
42
+ const db = DeltaDb.open({
43
+ schema: `
44
+ CREATE TABLE swaps (
45
+ block_number UInt64,
46
+ pool String,
47
+ amount Float64
48
+ );
49
+ CREATE MATERIALIZED VIEW volume AS
50
+ SELECT pool, sum(amount) AS total, count() AS cnt
51
+ FROM swaps GROUP BY pool;
52
+ `,
53
+ dataDir: './data', // optional, enables persistence
54
+ })
55
+
56
+ const batch = db.ingest({
57
+ data: {
58
+ swaps: [
59
+ { block_number: 1000, pool: 'ETH/USDC', amount: 100 },
60
+ ],
61
+ },
62
+ rollbackChain: [{ number: 1000, hash: '0x...' }],
63
+ finalizedHead: { number: 999, hash: '0x...' },
64
+ })
65
+ // batch.records → [{ table: 'swaps', op: 'insert', ... }, { table: 'volume', op: 'insert', ... }]
66
+ ```
67
+
68
+ ### Pipes SDK
69
+
70
+ ```typescript
71
+ import { deltaDbTarget } from '@subsquid/delta-db/pipes'
72
+
73
+ await source
74
+ .pipe(decoder)
75
+ .pipeTo(deltaDbTarget({
76
+ schema: '...',
77
+ dataDir: './data',
78
+ transform: (data) => ({ transfers: data.transfers.map(formatRow) }),
79
+ onDelta: async ({ batch }) => {
80
+ await clickhouse.insert(batch.records)
81
+ },
82
+ }))
83
+ ```
package/package.json ADDED
@@ -0,0 +1,33 @@
1
+ {
2
+ "name": "@sqd-pipes/delta-db",
3
+ "version": "0.0.1-alpha.0",
4
+ "description": "Embedded rollback-aware computation engine for blockchain data",
5
+ "main": "src/index.js",
6
+ "types": "src/index.d.ts",
7
+ "napi": {
8
+ "name": "delta-db",
9
+ "triples": {
10
+ "defaults": true
11
+ }
12
+ },
13
+ "scripts": {
14
+ "build": "napi build --cargo-cwd ../.. --features napi --release",
15
+ "build:debug": "napi build --cargo-cwd ../.. --features napi",
16
+ "test": "vitest run",
17
+ "test:watch": "vitest"
18
+ },
19
+ "dependencies": {
20
+ "@subsquid/pipes": "^0.1.0-beta.16",
21
+ "@msgpack/msgpack": "^3.0.0"
22
+ },
23
+ "devDependencies": {
24
+ "@napi-rs/cli": "^2.18.0",
25
+ "vitest": "^3.0.0"
26
+ },
27
+ "files": [
28
+ "src/index.js",
29
+ "src/index.d.ts",
30
+ "src/delta-db.node"
31
+ ],
32
+ "license": "MIT"
33
+ }
Binary file
package/src/index.d.ts ADDED
@@ -0,0 +1,80 @@
1
+ /** Configuration for opening a DeltaDb instance. */
2
+ export interface DeltaDbConfig {
3
+ /** SQL schema definition string. */
4
+ schema: string
5
+ /**
6
+ * Path to RocksDB data directory for persistence.
7
+ * When omitted, uses in-memory storage (data lost on restart).
8
+ */
9
+ dataDir?: string
10
+ /** Maximum buffer size before backpressure (default: 10000). */
11
+ maxBufferSize?: number
12
+ }
13
+
14
+ /** Block cursor: number + hash. */
15
+ export interface DeltaDbCursor {
16
+ number: number
17
+ hash: string
18
+ }
19
+
20
+ /** A single delta record. */
21
+ export interface DeltaRecord {
22
+ table: string
23
+ operation: string
24
+ key: Record<string, any>
25
+ values: Record<string, any>
26
+ prevValues?: Record<string, any> | null
27
+ }
28
+
29
+ /** A batch of delta records. */
30
+ export interface DeltaBatch {
31
+ sequence: number
32
+ finalizedHead?: DeltaDbCursor | null
33
+ latestHead?: DeltaDbCursor | null
34
+ records: Array<DeltaRecord>
35
+ }
36
+
37
+ /** Input for the atomic `ingest()` method. */
38
+ export interface IngestInput {
39
+ /** Table name → rows. Rows must contain `block_number`. */
40
+ data: Record<string, Array<Record<string, any>>>
41
+ /** Unfinalized blocks with hashes for fork resolution. */
42
+ rollbackChain?: Array<DeltaDbCursor>
43
+ /** Finalized head cursor — both number and hash stored. */
44
+ finalizedHead: DeltaDbCursor
45
+ }
46
+
47
+ /** Delta DB — embedded rollback-aware computation engine. */
48
+ export declare class DeltaDb {
49
+ /** Open a new DeltaDb instance. */
50
+ static open(config: DeltaDbConfig): DeltaDb
51
+ /**
52
+ * Process a batch of rows for a raw table.
53
+ * Returns true if backpressure should be applied.
54
+ */
55
+ processBatch(table: string, block: number, rows: Array<Record<string, any>>): boolean
56
+ /** Roll back all state after fork_point. */
57
+ rollback(forkPoint: number): void
58
+ /** Finalize all state up to and including the given block. */
59
+ finalize(block: number): void
60
+ /**
61
+ * Atomic ingest: process all tables, store rollback chain, finalize, flush.
62
+ * Returns the delta batch, or null if no records produced.
63
+ */
64
+ ingest(input: IngestInput): DeltaBatch | null
65
+ /**
66
+ * Find the common ancestor between our state and the Portal's chain.
67
+ * Returns the matching block cursor, or null if no common ancestor found.
68
+ */
69
+ resolveForkCursor(previousBlocks: Array<DeltaDbCursor>): DeltaDbCursor | null
70
+ /** Flush buffered deltas into a batch. Returns null if no pending records. */
71
+ flush(): DeltaBatch | null
72
+ /** Acknowledge a flushed batch by sequence number. */
73
+ ack(sequence: number): void
74
+ /** Number of pending (unflushed) delta records. */
75
+ get pendingCount(): number
76
+ /** Whether backpressure should be applied. */
77
+ get isBackpressured(): boolean
78
+ /** Current cursor: latest processed block + hash. Null if no blocks processed. */
79
+ get cursor(): DeltaDbCursor | null
80
+ }
package/src/index.js ADDED
@@ -0,0 +1,85 @@
1
+ // @ts-ignore
2
+ const { DeltaDb: NativeDeltaDb } = require('./delta-db.node')
3
+ const { encode, decode } = require('@msgpack/msgpack')
4
+
5
+ class DeltaDb {
6
+ /** @type {InstanceType<typeof NativeDeltaDb>} */
7
+ #native
8
+
9
+ /** @param {InstanceType<typeof NativeDeltaDb>} native */
10
+ constructor(native) {
11
+ this.#native = native
12
+ }
13
+
14
+ /** Open a new DeltaDb instance. */
15
+ static open(config) {
16
+ return new DeltaDb(NativeDeltaDb.open(config))
17
+ }
18
+
19
+ /**
20
+ * Process a batch of rows for a raw table.
21
+ * Returns true if backpressure should be applied.
22
+ */
23
+ processBatch(table, block, rows) {
24
+ return this.#native.processBatch(table, block, Buffer.from(encode(rows)))
25
+ }
26
+
27
+ /** Roll back all state after fork_point. */
28
+ rollback(forkPoint) {
29
+ this.#native.rollback(forkPoint)
30
+ }
31
+
32
+ /** Finalize all state up to and including the given block. */
33
+ finalize(block) {
34
+ this.#native.finalize(block)
35
+ }
36
+
37
+ /**
38
+ * Atomic ingest: process all tables, store rollback chain, finalize, flush.
39
+ * Returns the delta batch, or null if no records produced.
40
+ */
41
+ ingest(input) {
42
+ const buf = this.#native.ingest({
43
+ data: Buffer.from(encode(input.data)),
44
+ rollbackChain: input.rollbackChain,
45
+ finalizedHead: input.finalizedHead,
46
+ })
47
+ return buf ? decode(buf) : null
48
+ }
49
+
50
+ /**
51
+ * Find the common ancestor between our state and the Portal's chain.
52
+ * Returns the matching block cursor, or null if no common ancestor found.
53
+ */
54
+ resolveForkCursor(previousBlocks) {
55
+ return this.#native.resolveForkCursor(previousBlocks)
56
+ }
57
+
58
+ /** Flush buffered deltas into a batch. Returns null if no pending records. */
59
+ flush() {
60
+ const buf = this.#native.flush()
61
+ return buf ? decode(buf) : null
62
+ }
63
+
64
+ /** Acknowledge a flushed batch by sequence number. */
65
+ ack(sequence) {
66
+ this.#native.ack(sequence)
67
+ }
68
+
69
+ /** Number of pending (unflushed) delta records. */
70
+ get pendingCount() {
71
+ return this.#native.pendingCount
72
+ }
73
+
74
+ /** Whether backpressure should be applied. */
75
+ get isBackpressured() {
76
+ return this.#native.isBackpressured
77
+ }
78
+
79
+ /** Current cursor: latest processed block + hash. Null if no blocks processed. */
80
+ get cursor() {
81
+ return this.#native.cursor
82
+ }
83
+ }
84
+
85
+ module.exports.DeltaDb = DeltaDb