@dignetwork/chia-block-listener 0.1.18 → 0.1.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/.github/workflows/ci.yml +74 -0
  2. package/.github/workflows/release.yml +227 -0
  3. package/Cargo.toml +18 -14
  4. package/README.md +117 -0
  5. package/crate/chia-block-listener-napi/Cargo.toml +23 -0
  6. package/{src → crate/chia-block-listener-napi/src}/block_parser_napi.rs +1 -7
  7. package/{src → crate/chia-block-listener-napi/src}/dns_discovery_napi.rs +17 -38
  8. package/crate/chia-block-listener-napi/src/event_emitter.rs +582 -0
  9. package/crate/chia-block-listener-napi/src/lib.rs +30 -0
  10. package/{src → crate/chia-block-listener-napi/src}/peer_pool_napi.rs +122 -48
  11. package/crate/chia-generator-parser/Cargo.toml +1 -5
  12. package/crate/chia-generator-parser/examples/basic_usage.rs +2 -2
  13. package/crate/chia-generator-parser/src/lib.rs +7 -0
  14. package/crate/chia-generator-parser/src/parser.rs +148 -26
  15. package/crate/chia-generator-parser/src/types.rs +23 -3
  16. package/crate/{dns-discovery → dig-dns-discovery}/Cargo.toml +17 -15
  17. package/crate/{dns-discovery → dig-dns-discovery}/README.md +150 -150
  18. package/crate/{dns-discovery → dig-dns-discovery}/examples/discover_peers.rs +8 -9
  19. package/crate/{dns-discovery → dig-dns-discovery}/package-lock.json +2382 -2382
  20. package/crate/{dns-discovery → dig-dns-discovery}/src/lib.rs +10 -10
  21. package/index.d.ts +19 -24
  22. package/index.js +2 -1
  23. package/npm/darwin-arm64/package.json +1 -1
  24. package/npm/darwin-x64/package.json +1 -1
  25. package/npm/linux-arm64-gnu/package.json +1 -1
  26. package/npm/linux-x64-gnu/package.json +1 -1
  27. package/npm/win32-x64-msvc/package.json +1 -1
  28. package/package.json +11 -11
  29. package/src/block_listener.rs +268 -0
  30. package/src/dns_discovery.rs +66 -0
  31. package/src/error.rs +0 -6
  32. package/src/lib.rs +12 -16
  33. package/src/peer.rs +47 -21
  34. package/src/peer_pool.rs +326 -104
  35. package/src/protocol.rs +5 -6
  36. package/src/types.rs +83 -0
  37. package/.github/workflows/CI.yml +0 -340
  38. package/crate/chia-generator-parser/examples/production_test.rs +0 -147
  39. package/src/event_emitter.rs +0 -848
  40. /package/{build.rs → crate/chia-block-listener-napi/build.rs} +0 -0
@@ -0,0 +1,74 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [ main ]
6
+ pull_request:
7
+ branches: [ main ]
8
+
9
+ jobs:
10
+ rust-checks:
11
+ name: Rust checks (fmt, clippy)
12
+ runs-on: ubuntu-22.04
13
+ steps:
14
+ - name: Checkout
15
+ uses: actions/checkout@v4
16
+
17
+ - name: Setup Rust
18
+ uses: dtolnay/rust-toolchain@stable
19
+
20
+ - name: Cache cargo
21
+ uses: actions/cache@v4
22
+ with:
23
+ path: |
24
+ ~/.cargo/registry
25
+ ~/.cargo/git
26
+ target
27
+ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
28
+
29
+ - name: Format check
30
+ run: cargo fmt --all -- --check
31
+
32
+ - name: Clippy
33
+ run: cargo clippy --workspace --all-targets -- -D warnings
34
+
35
+ build:
36
+ name: Build (${{ matrix.os }})
37
+ needs: rust-checks
38
+ runs-on: ${{ matrix.os }}
39
+ strategy:
40
+ fail-fast: false
41
+ matrix:
42
+ os: [ubuntu-22.04, macos-14, windows-latest]
43
+ steps:
44
+ - name: Checkout
45
+ uses: actions/checkout@v4
46
+
47
+ - name: Setup Node.js
48
+ uses: actions/setup-node@v4
49
+ with:
50
+ node-version: 22
51
+
52
+ - name: Setup Rust
53
+ uses: dtolnay/rust-toolchain@stable
54
+
55
+ - name: Cache cargo
56
+ uses: actions/cache@v4
57
+ with:
58
+ path: |
59
+ ~/.cargo/registry
60
+ ~/.cargo/git
61
+ target
62
+ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
63
+
64
+ - name: Install yarn
65
+ run: corepack enable && corepack prepare yarn@1.22.22 --activate
66
+
67
+ - name: Install @napi-rs/cli
68
+ run: yarn global add @napi-rs/cli@^2.18.4
69
+
70
+ - name: Install deps
71
+ run: yarn install --frozen-lockfile
72
+
73
+ - name: Build NAPI addon
74
+ run: yarn build
@@ -0,0 +1,227 @@
1
+ name: Release
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ - "v*"
7
+
8
+ jobs:
9
+ publish-crates:
10
+ name: Publish crates to crates.io
11
+ runs-on: ubuntu-22.04
12
+ steps:
13
+ - name: Checkout
14
+ uses: actions/checkout@v4
15
+
16
+ - name: Setup Rust
17
+ uses: dtolnay/rust-toolchain@stable
18
+ with:
19
+ components: rustfmt, clippy
20
+
21
+ - name: Cache cargo
22
+ uses: actions/cache@v4
23
+ with:
24
+ path: |
25
+ ~/.cargo/registry
26
+ ~/.cargo/git
27
+ target
28
+ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
29
+
30
+ - name: Install curl + python3
31
+ run: |
32
+ sudo apt-get update
33
+ sudo apt-get install -y curl python3
34
+
35
+ # Enforce the same quality gates as CI for the crates you publish.
36
+ - name: Rust fmt check (published crates)
37
+ run: cargo fmt --all -- --check
38
+
39
+ - name: Rust clippy (published crates)
40
+ run: |
41
+ cargo clippy \
42
+ -p dig-dns-discovery \
43
+ -p chia-generator-parser \
44
+ -p chia-block-listener \
45
+ --all-targets \
46
+ -- -D warnings
47
+
48
+ - name: Cargo login
49
+ env:
50
+ CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
51
+ run: cargo login "$CARGO_REGISTRY_TOKEN"
52
+
53
+ - name: Publish dig-dns-discovery (dry-run + if needed)
54
+ env:
55
+ CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
56
+ working-directory: ./crate/dig-dns-discovery
57
+ shell: bash
58
+ run: |
59
+ set -euo pipefail
60
+ CRATE="dig-dns-discovery"
61
+ VER="$(cargo metadata --no-deps --format-version 1 | python3 -c 'import sys, json; print(json.load(sys.stdin)["packages"][0]["version"])')"
62
+ echo "Crate: ${CRATE} Version: ${VER}"
63
+
64
+ # Validate publish packaging/build without uploading
65
+ cargo publish --dry-run
66
+
67
+ CODE="$(curl -s -o /dev/null -w "%{http_code}" "https://crates.io/api/v1/crates/${CRATE}/${VER}" || true)"
68
+ if [ "$CODE" = "200" ]; then
69
+ echo "Already published: ${CRATE} ${VER} (skipping upload)."
70
+ exit 0
71
+ fi
72
+
73
+ cargo publish --no-verify
74
+
75
+ - name: Publish chia-generator-parser (dry-run + if needed)
76
+ env:
77
+ CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
78
+ working-directory: ./crate/chia-generator-parser
79
+ shell: bash
80
+ run: |
81
+ set -euo pipefail
82
+ CRATE="chia-generator-parser"
83
+ VER="$(cargo metadata --no-deps --format-version 1 | python3 -c 'import sys, json; print(json.load(sys.stdin)["packages"][0]["version"])')"
84
+ echo "Crate: ${CRATE} Version: ${VER}"
85
+
86
+ cargo publish --dry-run
87
+
88
+ CODE="$(curl -s -o /dev/null -w "%{http_code}" "https://crates.io/api/v1/crates/${CRATE}/${VER}" || true)"
89
+ if [ "$CODE" = "200" ]; then
90
+ echo "Already published: ${CRATE} ${VER} (skipping upload)."
91
+ exit 0
92
+ fi
93
+
94
+ cargo publish --no-verify
95
+
96
+ # Reduce flakiness: ensure the dependency versions are visible before publishing chia-block-listener.
97
+ - name: Wait for crates.io visibility (dig-dns-discovery + chia-generator-parser)
98
+ shell: bash
99
+ run: |
100
+ set -euo pipefail
101
+ DIG_VER="$(cargo metadata --no-deps --format-version 1 | python3 -c 'import sys, json; print(json.load(sys.stdin)["packages"][0]["version"])')"
102
+ PAR_VER="$(cargo metadata --no-deps --format-version 1 | python3 -c 'import sys, json; print(json.load(sys.stdin)["packages"][0]["version"])')"
103
+
104
+ check() {
105
+ local crate="$1" ver="$2"
106
+ curl -s -o /dev/null -w "%{http_code}" "https://crates.io/api/v1/crates/${crate}/${ver}" || true
107
+ }
108
+
109
+ for i in {1..24}; do
110
+ A="$(check dig-dns-discovery "$DIG_VER")"
111
+ B="$(check chia-generator-parser "$PAR_VER")"
112
+ if [ "$A" = "200" ] && [ "$B" = "200" ]; then
113
+ echo "Deps visible on crates.io."
114
+ exit 0
115
+ fi
116
+ echo "Waiting for crates.io visibility... (${i}/24)"
117
+ sleep 5
118
+ done
119
+
120
+ echo "Timed out waiting for dependency crates to be visible on crates.io."
121
+ exit 1
122
+
123
+ - name: Publish chia-block-listener (dry-run + if needed)
124
+ env:
125
+ CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
126
+ working-directory: .
127
+ shell: bash
128
+ run: |
129
+ set -euo pipefail
130
+ CRATE="chia-block-listener"
131
+ VER="$(cargo metadata --no-deps --format-version 1 | python3 -c 'import sys, json; print(json.load(sys.stdin)["packages"][0]["version"])')"
132
+ echo "Crate: ${CRATE} Version: ${VER}"
133
+
134
+ cargo publish --dry-run
135
+
136
+ CODE="$(curl -s -o /dev/null -w "%{http_code}" "https://crates.io/api/v1/crates/${CRATE}/${VER}" || true)"
137
+ if [ "$CODE" = "200" ]; then
138
+ echo "Already published: ${CRATE} ${VER} (skipping upload)."
139
+ exit 0
140
+ fi
141
+
142
+ cargo publish --no-verify
143
+
144
+ build:
145
+ name: Build binaries (${{ matrix.os }})
146
+ needs: publish-crates
147
+ runs-on: ${{ matrix.os }}
148
+ strategy:
149
+ fail-fast: false
150
+ matrix:
151
+ os: [ubuntu-22.04, macos-14, windows-latest]
152
+ steps:
153
+ - uses: actions/checkout@v4
154
+
155
+ - name: Setup Node.js
156
+ uses: actions/setup-node@v4
157
+ with:
158
+ node-version: 22
159
+
160
+ - name: Setup Rust
161
+ uses: dtolnay/rust-toolchain@stable
162
+
163
+ - name: Cache cargo
164
+ uses: actions/cache@v4
165
+ with:
166
+ path: |
167
+ ~/.cargo/registry
168
+ ~/.cargo/git
169
+ target
170
+ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
171
+
172
+ - name: Install yarn
173
+ run: corepack enable && corepack prepare yarn@1.22.22 --activate
174
+
175
+ - name: Install @napi-rs/cli
176
+ run: yarn global add @napi-rs/cli@^2.18.4
177
+
178
+ - name: Install deps
179
+ run: yarn install --frozen-lockfile
180
+
181
+ - name: Build artifacts
182
+ run: yarn build
183
+
184
+ - name: Upload artifact
185
+ uses: actions/upload-artifact@v4
186
+ with:
187
+ name: artifacts-${{ runner.os }}
188
+ path: |
189
+ ./*.node
190
+ npm/**/package.json
191
+ npm/**/*.node
192
+
193
+ publish:
194
+ name: Publish to npm
195
+ needs: [ publish-crates, build ]
196
+ runs-on: ubuntu-22.04
197
+ steps:
198
+ - uses: actions/checkout@v4
199
+
200
+ - name: Setup Node.js
201
+ uses: actions/setup-node@v4
202
+ with:
203
+ node-version: 22
204
+ registry-url: "https://registry.npmjs.org"
205
+
206
+ - name: Install yarn
207
+ run: corepack enable && corepack prepare yarn@1.22.22 --activate
208
+
209
+ - name: Install deps
210
+ run: yarn install --frozen-lockfile
211
+
212
+ - name: Download artifacts
213
+ uses: actions/download-artifact@v4
214
+ with:
215
+ path: artifacts
216
+ merge-multiple: true
217
+
218
+ - name: Install yarn
219
+ run: corepack enable && corepack prepare yarn@1.22.22 --activate
220
+
221
+ - name: Install deps
222
+ run: yarn install --frozen-lockfile
223
+
224
+ - name: Publish main npm package
225
+ env:
226
+ NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
227
+ run: npm publish --access public
package/Cargo.toml CHANGED
@@ -1,19 +1,16 @@
1
1
  [package]
2
2
  name = "chia-block-listener"
3
- version = "0.1.0"
3
+ version = "0.2.0"
4
4
  edition = "2021"
5
-
6
- [lib]
7
- crate-type = ["cdylib"]
5
+ description = "Event driven listener and messaging interface for Chia blockchain peers"
6
+ license = "Apache-2.0" # or "MIT" / "MIT OR Apache-2.0" etc.
8
7
 
9
8
  [dependencies]
10
- # NAPI-rs for Node.js bindings
11
- napi = { version = "2", features = ["napi9", "tokio_rt", "async"] }
12
- napi-derive = "2"
13
9
 
14
10
  # Async runtime
15
11
  tokio = { version = "1", features = ["full"] }
16
12
  futures-util = "0.3"
13
+ tokio-util = "0.7"
17
14
 
18
15
  # Chia-related dependencies
19
16
  chia-protocol = "0.26"
@@ -34,13 +31,20 @@ tracing = "0.1"
34
31
  tracing-subscriber = { version = "0.3", features = ["env-filter"] }
35
32
  sha2 = "0.10"
36
33
  dirs = "5"
37
-
38
- # Local crates
39
- chia-generator-parser = { path = "./crate/chia-generator-parser" }
40
- dns-discovery = { path = "./crate/dns-discovery" }
41
-
42
- [build-dependencies]
43
- napi-build = "2"
34
+ dig-dns-discovery = { workspace = true }
35
+ chia-generator-parser = { workspace = true}
44
36
 
45
37
  [profile.release]
46
38
  lto = true
39
+
40
+ [workspace]
41
+ members = [
42
+ ".",
43
+ "crate/chia-generator-parser",
44
+ "crate/dig-dns-discovery",
45
+ "crate/chia-block-listener-napi",
46
+ ]
47
+
48
+ [workspace.dependencies]
49
+ dig-dns-discovery = { path = "crate/dig-dns-discovery", version = "0.2.0" }
50
+ chia-generator-parser = { path = "crate/chia-generator-parser", version = "0.2.0" }
package/README.md CHANGED
@@ -68,6 +68,123 @@ process.on('SIGINT', () => {
68
68
  process.exit(0)
69
69
  })
70
70
  ```
71
+
72
+ ## Rust usage (canonical, Rust-first interface)
73
+
74
+ This repository now provides a pure Rust API suitable for use in Tokio-based applications, while the N-API adapter remains a thin layer for Node.js. The Rust-facing entry point is `BlockListener` (previously named `Listener`).
75
+
76
+ Key points of the Rust API:
77
+ - Your application owns the policy/event loop. The library only manages networking, peers, and event emission.
78
+ - Subscribe to events via a bounded broadcast channel. Delivery is best-effort; slow consumers may miss messages, which is appropriate for catch-up/skip logic.
79
+ - Structured shutdown: signal cancellation quickly and optionally wait for all internal tasks to complete.
80
+
81
+ Example:
82
+
83
+ ```rust
84
+ use chia_block_listener::{init_tracing, BlockListener, ListenerConfig};
85
+ use chia_block_listener::types::Event;
86
+ use tokio_stream::wrappers::BroadcastStream;
87
+ use tokio_stream::StreamExt;
88
+
89
+ #[tokio::main]
90
+ async fn main() -> anyhow::Result<()> {
91
+ // Optional: enable logging
92
+ init_tracing();
93
+
94
+ // Configure and create the block listener (buffer defaults to 1024)
95
+ let block_listener = BlockListener::new(ListenerConfig::default())?;
96
+
97
+ // Subscribe to events (bounded, best-effort delivery)
98
+ let rx = block_listener.subscribe();
99
+ let mut events = BroadcastStream::new(rx);
100
+
101
+ // Connect a peer
102
+ let _peer_id = block_listener.add_peer("localhost".into(), 8444, "mainnet".into()).await?;
103
+
104
+ // Application-owned policy loop: read events and act
105
+ let policy = tokio::spawn(async move {
106
+ while let Some(item) = events.next().await {
107
+ match item {
108
+ Ok(Event::PeerConnected(e)) => {
109
+ println!("connected: {} ({}:{})", e.peer_id, e.host, e.port);
110
+ }
111
+ Ok(Event::PeerDisconnected(e)) => {
112
+ println!("disconnected: {} reason={:?}", e.peer_id, e.message);
113
+ }
114
+ Ok(Event::NewPeakHeight(e)) => {
115
+ println!("new peak: old={:?} new={} from {}", e.old_peak, e.new_peak, e.peer_id);
116
+ }
117
+ Ok(Event::BlockReceived(b)) => {
118
+ println!("block {} {}", b.height, b.header_hash);
119
+ }
120
+ Err(_lagged) => {
121
+ // One or more messages were missed (slow consumer). Best-effort model: recompute current state if needed.
122
+ // For catch-up use cases, you generally only need latest peak or most recent block.
123
+ }
124
+ }
125
+ }
126
+ });
127
+
128
+ // Shutdown example (e.g., on SIGINT/SIGTERM):
129
+ // Signal fast
130
+ block_listener.shutdown().await?;
131
+ // Wait for all internal tasks to end deterministically
132
+ block_listener.shutdown_and_wait().await?;
133
+
134
+ policy.await?;
135
+ Ok(())
136
+ }
137
+ ```
138
+
139
+ ### Backpressure and delivery guarantees
140
+ - The core guarantees that parsed blocks are submitted into the internal event pipeline (no pre-broadcast drop). Blocks are enqueued with backpressure inside the pool and forwarded to the broadcast dispatcher.
141
+ - Event broadcast to subscribers uses a bounded buffer (default 1024). If a subscriber is slow, it may receive `Lagged(n)` from the stream wrapper, meaning it missed `n` events. This affects slow subscribers only and does not cause block events to be dropped before entering the broadcast.
142
+ - Informational events (peerConnected, peerDisconnected, newPeakHeight) are best-effort before broadcast and may be dropped under overload to avoid stalling networking.
143
+ - Recommended approach for “catch-up” logic: maintain your own application state keyed by height/epoch and cancel/work-skip as newer events arrive. If you need every block for historical processing, use explicit queries like `get_block_by_height`/ranges in addition to the live stream.
144
+
145
+ ### Passive WebSocket listening and multi-peer behavior
146
+ - **Listener, not poller:** Each `add_peer` establishes a dedicated streaming WebSocket reader (per peer) that passively consumes protocol messages and drives events. `NewPeakWallet` triggers a `RequestBlock` immediately; `RespondBlock` is parsed and emitted as `Event::BlockReceived` via the core event pipeline.
147
+ - **Unified event pipeline:** All live events (`PeerConnected`, `PeerDisconnected`, `NewPeakHeight`, `BlockReceived`) flow through a single core mpsc sink and are forwarded to all subscribers via `broadcast`. Blocks use `send().await` into the sink (no pre-broadcast drop); informational events are best-effort `try_send` to avoid stalling I/O.
148
+ - **Multiple peers:** You can call `add_peer` multiple times. The pool tracks each peer independently with its own streaming reader and request worker. On-demand requests (`get_block_by_height`) use round-robin with cooldown and remove unhealthy peers based on repeated failures/timeouts/protocol errors.
149
+ - **Consumption pattern:** Your app owns the policy loop—subscribe once, then react to events as they arrive. Slow subscribers may receive `Lagged(n)` from the broadcast wrapper; recompute state as needed. For guaranteed historical coverage, pair live listening with explicit `get_block_by_height` / range queries.
150
+ - **Shutdown:** `shutdown()` signals cancellation; `shutdown_and_wait()` awaits the dispatcher and all tracked peer/request tasks for deterministic teardown. `Drop` on `Listener` signals cancel without awaiting (non-blocking drop safety).
151
+
152
+ ### Auto-reconnect (DNS-driven, Rust API)
153
+ - **Opt-in via config:** Set `BlockListenerConfig { auto_reconnect: true, network_id, default_port, max_auto_reconnect_retries, .. }` when constructing `BlockListener`.
154
+ - **When it runs:** The auto-reconnect task starts on the first `subscribe()` call. It keeps one active peer for you; manual `add_peer` calls still work and can coexist.
155
+ - **How it works:**
156
+ - Uses the core DNS introducers for the configured `network_id` (default `mainnet`) and `default_port` (default `8444`).
157
+ - On startup (if no peers connected) or when all peers disconnect, it runs discovery, shuffles the results, and tries each peer in the batch until one connects.
158
+ - If a peer disconnects and none remain, discovery is rerun and peers are retried in batches up to `max_auto_reconnect_retries` (default 10 batches). Failure after all retries surfaces a best-effort `PeerDisconnected` event with an explanatory message.
159
+ - **Backpressure & events:** Once connected, live events still flow through the same sink → broadcast pipeline; block delivery guarantees and backpressure semantics remain unchanged.
160
+ - **Example:**
161
+ ```rust
162
+ use chia_block_listener::{BlockListener, BlockListenerConfig};
163
+
164
+ let config = BlockListenerConfig {
165
+ auto_reconnect: true,
166
+ network_id: "mainnet".into(),
167
+ default_port: 8444,
168
+ max_auto_reconnect_retries: 10,
169
+ buffer: 1024,
170
+ };
171
+
172
+ let listener = BlockListener::new(config)?;
173
+ let mut rx = listener.subscribe(); // kicks off auto-reconnect
174
+ ```
175
+
176
+ ### Shutdown semantics
177
+ - `shutdown()` signals cancellation and returns quickly.
178
+ - `shutdown_and_wait()` cancels and then awaits all internal tasks to finish (peer workers, request processor, dispatchers), providing deterministic shutdown.
179
+
180
+ ### Configuration
181
+ - `BlockListenerConfig` exposes:
182
+ - `buffer` (event buffer per subscriber, default 1024)
183
+ - `auto_reconnect` (enable DNS-based single-peer maintenance)
184
+ - `network_id` (e.g., `mainnet`, `testnet11`; used for DNS discovery)
185
+ - `default_port` (port used for discovered peers, default 8444)
186
+ - `max_auto_reconnect_retries` (discovery batches to attempt before surfacing an error event, default 10)
187
+ - Additional knobs (timeouts, rate limits, etc.) are centralized in the core with sensible defaults and named constants.
71
188
  ## API Reference
72
189
 
73
190
  ### ChiaBlockListener Class
@@ -0,0 +1,23 @@
1
+ [package]
2
+ name = "chia-block-listener-napi"
3
+ version = "0.2.0"
4
+ edition = "2021"
5
+
6
+ [lib]
7
+ crate-type = ["cdylib"]
8
+
9
+ [dependencies]
10
+ napi = { version = "2", features = ["napi9", "tokio_rt", "async"] }
11
+ napi-derive = "2"
12
+ tokio = { version = "1", features = ["full"] }
13
+ futures-util = "0.3"
14
+ hex = "0.4"
15
+ tracing = "0.1"
16
+ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
17
+ chia-block-listener = { path = "../.." }
18
+ chia-generator-parser = { path = "../chia-generator-parser" }
19
+ chia-protocol = "0.26"
20
+ chia-traits = "0.26"
21
+
22
+ [build-dependencies]
23
+ napi-build = "2"
@@ -124,7 +124,6 @@ impl From<&GeneratorBlockInfo> for GeneratorBlockInfoJS {
124
124
 
125
125
  // Export BlockHeightInfo for TypeScript
126
126
  #[napi(object)]
127
- #[derive(Clone)]
128
127
  pub struct BlockHeightInfoJS {
129
128
  pub height: u32,
130
129
  #[napi(js_name = "isTransactionBlock")]
@@ -145,15 +144,10 @@ pub struct ChiaBlockParser {
145
144
  parser: RustBlockParser,
146
145
  }
147
146
 
148
- impl Default for ChiaBlockParser {
149
- fn default() -> Self {
150
- Self::new()
151
- }
152
- }
153
-
154
147
  #[napi]
155
148
  impl ChiaBlockParser {
156
149
  /// Create a new block parser
150
+ #[allow(clippy::new_without_default)]
157
151
  #[napi(constructor)]
158
152
  pub fn new() -> Self {
159
153
  info!("Creating new ChiaBlockParser");
@@ -1,4 +1,5 @@
1
- use dns_discovery::{DiscoveryResult, DnsDiscovery, DnsDiscoveryError, PeerAddress};
1
+ use chia_block_listener::dns_discovery::{DiscoveryResult, DnsDiscoveryError, PeerAddress};
2
+ use chia_block_listener::DnsDiscoveryClient as CoreDnsDiscoveryClient;
2
3
  use napi::bindgen_prelude::*;
3
4
  use napi_derive::napi;
4
5
  use tracing::{debug, info};
@@ -82,7 +83,7 @@ pub struct AddressResult {
82
83
 
83
84
  #[napi]
84
85
  pub struct DnsDiscoveryClient {
85
- discovery: DnsDiscovery,
86
+ core: CoreDnsDiscoveryClient,
86
87
  }
87
88
 
88
89
  #[napi]
@@ -93,14 +94,14 @@ impl DnsDiscoveryClient {
93
94
  info!("Creating new DnsDiscoveryClient");
94
95
 
95
96
  let rt = tokio::runtime::Handle::current();
96
- let discovery = rt
97
- .block_on(async { DnsDiscovery::new().await })
97
+ let core = rt
98
+ .block_on(async { CoreDnsDiscoveryClient::new().await })
98
99
  .map_err(|e| {
99
100
  let error_info = DnsDiscoveryErrorInfo::from(e);
100
101
  Error::new(Status::GenericFailure, error_info.message)
101
102
  })?;
102
103
 
103
- Ok(Self { discovery })
104
+ Ok(Self { core })
104
105
  }
105
106
 
106
107
  /// Discover peers for Chia mainnet
@@ -108,7 +109,7 @@ impl DnsDiscoveryClient {
108
109
  pub async fn discover_mainnet_peers(&self) -> Result<DiscoveryResultJS> {
109
110
  debug!("Discovering mainnet peers via DNS");
110
111
 
111
- self.discovery
112
+ self.core
112
113
  .discover_mainnet_peers()
113
114
  .await
114
115
  .map(|result| DiscoveryResultJS::from(&result))
@@ -123,7 +124,7 @@ impl DnsDiscoveryClient {
123
124
  pub async fn discover_testnet11_peers(&self) -> Result<DiscoveryResultJS> {
124
125
  debug!("Discovering testnet11 peers via DNS");
125
126
 
126
- self.discovery
127
+ self.core
127
128
  .discover_testnet11_peers()
128
129
  .await
129
130
  .map(|result| DiscoveryResultJS::from(&result))
@@ -145,10 +146,8 @@ impl DnsDiscoveryClient {
145
146
  introducers.len()
146
147
  );
147
148
 
148
- let introducer_refs: Vec<&str> = introducers.iter().map(|s| s.as_str()).collect();
149
-
150
- self.discovery
151
- .discover_peers(&introducer_refs, default_port)
149
+ self.core
150
+ .discover_peers(&introducers, default_port)
152
151
  .await
153
152
  .map(|result| DiscoveryResultJS::from(&result))
154
153
  .map_err(|e| {
@@ -162,8 +161,8 @@ impl DnsDiscoveryClient {
162
161
  pub async fn resolve_ipv4(&self, hostname: String) -> Result<AddressResult> {
163
162
  debug!("Resolving IPv4 addresses for {}", hostname);
164
163
 
165
- self.discovery
166
- .resolve_ipv4(&hostname)
164
+ self.core
165
+ .resolve_ipv4(hostname.as_str())
167
166
  .await
168
167
  .map(|addrs| AddressResult {
169
168
  addresses: addrs.iter().map(|addr| addr.to_string()).collect(),
@@ -180,8 +179,8 @@ impl DnsDiscoveryClient {
180
179
  pub async fn resolve_ipv6(&self, hostname: String) -> Result<AddressResult> {
181
180
  debug!("Resolving IPv6 addresses for {}", hostname);
182
181
 
183
- self.discovery
184
- .resolve_ipv6(&hostname)
182
+ self.core
183
+ .resolve_ipv6(hostname.as_str())
185
184
  .await
186
185
  .map(|addrs| AddressResult {
187
186
  addresses: addrs.iter().map(|addr| addr.to_string()).collect(),
@@ -198,30 +197,10 @@ impl DnsDiscoveryClient {
198
197
  pub async fn resolve_both(&self, hostname: String, port: u16) -> Result<DiscoveryResultJS> {
199
198
  debug!("Resolving both IPv4 and IPv6 addresses for {}", hostname);
200
199
 
201
- self.discovery
202
- .resolve_both(&hostname, port)
200
+ self.core
201
+ .resolve_both(hostname.as_str(), port)
203
202
  .await
204
203
  .map(|result| DiscoveryResultJS::from(&result))
205
- .map_err(|e| {
206
- let error_info = DnsDiscoveryErrorInfo::from(e);
207
- Error::new(Status::GenericFailure, error_info.message)
208
- })
209
- }
210
- }
211
-
212
- impl Default for DnsDiscoveryClient {
213
- fn default() -> Self {
214
- Self::new().unwrap()
215
- }
216
- }
217
-
218
- #[cfg(test)]
219
- mod tests {
220
- use super::*;
221
-
222
- #[tokio::test]
223
- async fn test_client_creation() {
224
- let client = DnsDiscoveryClient::new();
225
- assert!(client.is_ok());
204
+ .map_err(|e| Error::new(Status::GenericFailure, e.to_string()))
226
205
  }
227
206
  }