tonutils 2.0.1b3__py3-none-any.whl → 2.0.1b4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tonutils/__meta__.py +1 -1
- tonutils/cli.py +111 -0
- tonutils/clients/__init__.py +4 -4
- tonutils/clients/adnl/__init__.py +4 -4
- tonutils/clients/adnl/balancer.py +58 -58
- tonutils/clients/adnl/client.py +20 -20
- tonutils/clients/adnl/provider/config.py +13 -8
- tonutils/clients/adnl/provider/provider.py +39 -42
- tonutils/clients/adnl/provider/transport.py +30 -25
- tonutils/exceptions.py +41 -31
- tonutils/tonconnect/__init__.py +0 -0
- tonutils/tools/__init__.py +6 -0
- tonutils/tools/block_scanner/__init__.py +26 -0
- tonutils/tools/block_scanner/annotations.py +23 -0
- tonutils/tools/block_scanner/dispatcher.py +141 -0
- tonutils/tools/block_scanner/events.py +31 -0
- tonutils/tools/block_scanner/scanner.py +315 -0
- tonutils/tools/block_scanner/traversal.py +96 -0
- tonutils/tools/block_scanner/where.py +151 -0
- tonutils/tools/status_monitor/__init__.py +3 -0
- tonutils/tools/status_monitor/console.py +157 -0
- tonutils/tools/status_monitor/models.py +27 -0
- tonutils/tools/status_monitor/monitor.py +295 -0
- tonutils/types.py +12 -4
- {tonutils-2.0.1b3.dist-info → tonutils-2.0.1b4.dist-info}/METADATA +2 -5
- {tonutils-2.0.1b3.dist-info → tonutils-2.0.1b4.dist-info}/RECORD +30 -15
- tonutils-2.0.1b4.dist-info/entry_points.txt +2 -0
- {tonutils-2.0.1b3.dist-info → tonutils-2.0.1b4.dist-info}/WHEEL +0 -0
- {tonutils-2.0.1b3.dist-info → tonutils-2.0.1b4.dist-info}/licenses/LICENSE +0 -0
- {tonutils-2.0.1b3.dist-info → tonutils-2.0.1b4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import inspect
|
|
3
|
+
import traceback
|
|
4
|
+
import typing as t
|
|
5
|
+
|
|
6
|
+
from tonutils.tools.block_scanner.annotations import (
|
|
7
|
+
AnyHandler,
|
|
8
|
+
AnyWhere,
|
|
9
|
+
Decorator,
|
|
10
|
+
Handler,
|
|
11
|
+
HandlerEntry,
|
|
12
|
+
TEvent,
|
|
13
|
+
Where,
|
|
14
|
+
)
|
|
15
|
+
from tonutils.tools.block_scanner.events import EventBase
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class EventDispatcher:
|
|
19
|
+
"""Dispatches events to registered handlers asynchronously."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, max_concurrency: int = 1000) -> None:
|
|
22
|
+
"""
|
|
23
|
+
Initialize EventDispatcher.
|
|
24
|
+
|
|
25
|
+
:param max_concurrency: maximum number of concurrent handler tasks.
|
|
26
|
+
"""
|
|
27
|
+
self._handlers: t.Dict[t.Type[EventBase], t.List[HandlerEntry]] = {}
|
|
28
|
+
self._sem = asyncio.Semaphore(max(1, max_concurrency))
|
|
29
|
+
self._tasks: t.Set[asyncio.Task[None]] = set()
|
|
30
|
+
self._closed = False
|
|
31
|
+
|
|
32
|
+
def register(
|
|
33
|
+
self,
|
|
34
|
+
event_type: t.Type[TEvent],
|
|
35
|
+
handler: Handler[TEvent],
|
|
36
|
+
*,
|
|
37
|
+
where: t.Optional[Where[TEvent]] = None,
|
|
38
|
+
) -> None:
|
|
39
|
+
"""
|
|
40
|
+
Register a handler for a specific event type.
|
|
41
|
+
|
|
42
|
+
:param event_type: subclass of EventBase to handle.
|
|
43
|
+
:param handler: callable receiving the event.
|
|
44
|
+
:param where: optional filter predicate. Handler is invoked only if predicate returns True.
|
|
45
|
+
"""
|
|
46
|
+
if not callable(handler):
|
|
47
|
+
raise TypeError("handler must be callable")
|
|
48
|
+
|
|
49
|
+
entry: HandlerEntry = (
|
|
50
|
+
t.cast(AnyHandler, handler),
|
|
51
|
+
t.cast(t.Optional[AnyWhere], where),
|
|
52
|
+
)
|
|
53
|
+
self._handlers.setdefault(event_type, []).append(entry)
|
|
54
|
+
|
|
55
|
+
def on(
|
|
56
|
+
self,
|
|
57
|
+
event_type: t.Type[TEvent],
|
|
58
|
+
*,
|
|
59
|
+
where: t.Optional[Where[TEvent]] = None,
|
|
60
|
+
) -> Decorator[TEvent]:
|
|
61
|
+
"""
|
|
62
|
+
Decorator to register a handler for an event type.
|
|
63
|
+
|
|
64
|
+
:param event_type: event class to handle.
|
|
65
|
+
:param where: optional filter predicate.
|
|
66
|
+
:return: Decorator that registers the handler.
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
def decorator(fn: Handler[TEvent]) -> Handler[TEvent]:
|
|
70
|
+
self.register(event_type=event_type, handler=fn, where=where)
|
|
71
|
+
return fn
|
|
72
|
+
|
|
73
|
+
return decorator
|
|
74
|
+
|
|
75
|
+
def _iter_handlers(self, event: EventBase) -> t.Sequence[HandlerEntry]:
|
|
76
|
+
"""Return all handlers matching the type of `event`."""
|
|
77
|
+
out: t.List[HandlerEntry] = []
|
|
78
|
+
for tp in type(event).mro():
|
|
79
|
+
if tp is EventBase:
|
|
80
|
+
break
|
|
81
|
+
entries = self._handlers.get(t.cast(t.Type[EventBase], tp))
|
|
82
|
+
if entries:
|
|
83
|
+
out.extend(entries)
|
|
84
|
+
return out
|
|
85
|
+
|
|
86
|
+
def _on_task_done(self, task: asyncio.Task[None]) -> None:
|
|
87
|
+
"""Callback to handle task completion and print exceptions."""
|
|
88
|
+
self._tasks.discard(task)
|
|
89
|
+
try:
|
|
90
|
+
exc = task.exception()
|
|
91
|
+
except asyncio.CancelledError:
|
|
92
|
+
return
|
|
93
|
+
if exc is not None:
|
|
94
|
+
traceback.print_exception(type(exc), exc, exc.__traceback__)
|
|
95
|
+
|
|
96
|
+
async def _run_task(
|
|
97
|
+
self,
|
|
98
|
+
handler: AnyHandler,
|
|
99
|
+
event: EventBase,
|
|
100
|
+
where: t.Optional[AnyWhere] = None,
|
|
101
|
+
) -> None:
|
|
102
|
+
"""
|
|
103
|
+
Run a single handler task with optional 'where' filtering.
|
|
104
|
+
|
|
105
|
+
:param handler: async callable to execute.
|
|
106
|
+
:param event: event instance to pass.
|
|
107
|
+
:param where: optional predicate, skip handler if False.
|
|
108
|
+
"""
|
|
109
|
+
async with self._sem:
|
|
110
|
+
if where is not None:
|
|
111
|
+
result = where(event)
|
|
112
|
+
if inspect.isawaitable(result):
|
|
113
|
+
result = await result
|
|
114
|
+
if not result:
|
|
115
|
+
return
|
|
116
|
+
await handler(event)
|
|
117
|
+
|
|
118
|
+
def emit(self, event: EventBase) -> None:
|
|
119
|
+
"""
|
|
120
|
+
Emit an event to all matching handlers.
|
|
121
|
+
|
|
122
|
+
Handlers are executed asynchronously.
|
|
123
|
+
"""
|
|
124
|
+
if self._closed:
|
|
125
|
+
return
|
|
126
|
+
|
|
127
|
+
for handler, where in self._iter_handlers(event):
|
|
128
|
+
task = asyncio.create_task(self._run_task(handler, event, where))
|
|
129
|
+
self._tasks.add(task)
|
|
130
|
+
task.add_done_callback(self._on_task_done)
|
|
131
|
+
|
|
132
|
+
async def aclose(self) -> None:
|
|
133
|
+
"""
|
|
134
|
+
Close the dispatcher and wait for all running handler tasks.
|
|
135
|
+
|
|
136
|
+
After calling, no new events will be dispatched.
|
|
137
|
+
"""
|
|
138
|
+
self._closed = True
|
|
139
|
+
if self._tasks:
|
|
140
|
+
await asyncio.gather(*self._tasks, return_exceptions=True)
|
|
141
|
+
self._tasks.clear()
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import typing as t
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
|
|
4
|
+
from pytoniq_core import Transaction
|
|
5
|
+
from pytoniq_core.tl import BlockIdExt
|
|
6
|
+
|
|
7
|
+
from tonutils.clients import LiteBalancer, LiteClient
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass(frozen=True, slots=True)
|
|
11
|
+
class EventBase:
|
|
12
|
+
client: t.Union[LiteBalancer, LiteClient]
|
|
13
|
+
mc_block: BlockIdExt
|
|
14
|
+
context: t.Dict[str, t.Any]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass(frozen=True, slots=True)
|
|
18
|
+
class BlockEvent(EventBase):
|
|
19
|
+
block: BlockIdExt
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass(frozen=True, slots=True)
|
|
23
|
+
class TransactionEvent(EventBase):
|
|
24
|
+
block: BlockIdExt
|
|
25
|
+
transaction: Transaction
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass(frozen=True, slots=True)
|
|
29
|
+
class TransactionsEvent(EventBase):
|
|
30
|
+
block: BlockIdExt
|
|
31
|
+
transactions: t.List[Transaction] = field(default_factory=list)
|
|
@@ -0,0 +1,315 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import typing as t
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
|
|
5
|
+
from pytoniq_core import Transaction
|
|
6
|
+
from pytoniq_core.tl import BlockIdExt
|
|
7
|
+
|
|
8
|
+
from tonutils.clients import LiteBalancer, LiteClient
|
|
9
|
+
from tonutils.tools.block_scanner.annotations import (
|
|
10
|
+
BlockWhere,
|
|
11
|
+
Decorator,
|
|
12
|
+
Handler,
|
|
13
|
+
TransactionWhere,
|
|
14
|
+
TransactionsWhere,
|
|
15
|
+
)
|
|
16
|
+
from tonutils.tools.block_scanner.dispatcher import EventDispatcher
|
|
17
|
+
from tonutils.tools.block_scanner.events import (
|
|
18
|
+
BlockEvent,
|
|
19
|
+
TransactionEvent,
|
|
20
|
+
TransactionsEvent,
|
|
21
|
+
)
|
|
22
|
+
from tonutils.tools.block_scanner.traversal import ShardTraversal
|
|
23
|
+
from tonutils.types import WorkchainID, MASTERCHAIN_SHARD
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass(slots=True)
|
|
27
|
+
class _ScanState:
|
|
28
|
+
"""Internal scanner state per masterchain block."""
|
|
29
|
+
|
|
30
|
+
mc_block: BlockIdExt
|
|
31
|
+
shards_seqno: t.Dict[t.Tuple[int, int], int]
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class BlockScanner:
|
|
35
|
+
"""Asynchronous scanner for TON blockchain."""
|
|
36
|
+
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
*,
|
|
40
|
+
client: t.Union[LiteBalancer, LiteClient],
|
|
41
|
+
poll_interval: float = 0.1,
|
|
42
|
+
include_transactions: bool = True,
|
|
43
|
+
max_concurrency: int = 1000,
|
|
44
|
+
**context: t.Any,
|
|
45
|
+
) -> None:
|
|
46
|
+
"""
|
|
47
|
+
Initialize a BlockScanner.
|
|
48
|
+
|
|
49
|
+
:param client: LiteClient or LiteBalancer instance for blockchain access.
|
|
50
|
+
:param poll_interval: Interval in seconds to poll for new masterchain blocks.
|
|
51
|
+
:param include_transactions: If True, emit TransactionEvent and TransactionsEvent.
|
|
52
|
+
:param max_concurrency: Maximum number of concurrent event handler tasks.
|
|
53
|
+
:param context: Additional key/value data passed to all emitted events.
|
|
54
|
+
"""
|
|
55
|
+
self._client = client
|
|
56
|
+
self._context = dict(context)
|
|
57
|
+
self._poll_interval = poll_interval
|
|
58
|
+
self._include_transactions = include_transactions
|
|
59
|
+
|
|
60
|
+
self._traversal = ShardTraversal()
|
|
61
|
+
self._dispatcher = EventDispatcher(max_concurrency)
|
|
62
|
+
|
|
63
|
+
self._stop_event = asyncio.Event()
|
|
64
|
+
self._running = False
|
|
65
|
+
|
|
66
|
+
@t.overload
|
|
67
|
+
def register(
|
|
68
|
+
self,
|
|
69
|
+
event_type: t.Type[BlockEvent],
|
|
70
|
+
handler: Handler[BlockEvent],
|
|
71
|
+
*,
|
|
72
|
+
where: t.Optional[BlockWhere] = None,
|
|
73
|
+
) -> None: ...
|
|
74
|
+
|
|
75
|
+
@t.overload
|
|
76
|
+
def register(
|
|
77
|
+
self,
|
|
78
|
+
event_type: t.Type[TransactionEvent],
|
|
79
|
+
handler: Handler[TransactionEvent],
|
|
80
|
+
*,
|
|
81
|
+
where: t.Optional[TransactionWhere] = None,
|
|
82
|
+
) -> None: ...
|
|
83
|
+
|
|
84
|
+
@t.overload
|
|
85
|
+
def register(
|
|
86
|
+
self,
|
|
87
|
+
event_type: t.Type[TransactionsEvent],
|
|
88
|
+
handler: Handler[TransactionsEvent],
|
|
89
|
+
*,
|
|
90
|
+
where: t.Optional[TransactionsWhere] = None,
|
|
91
|
+
) -> None: ...
|
|
92
|
+
|
|
93
|
+
def register(
|
|
94
|
+
self,
|
|
95
|
+
event_type: t.Any,
|
|
96
|
+
handler: t.Any,
|
|
97
|
+
*,
|
|
98
|
+
where: t.Any = None,
|
|
99
|
+
) -> None:
|
|
100
|
+
"""Register a handler for an event type with optional filter."""
|
|
101
|
+
self._dispatcher.register(event_type, handler, where=where)
|
|
102
|
+
|
|
103
|
+
def on_block(
|
|
104
|
+
self,
|
|
105
|
+
where: t.Optional[BlockWhere] = None,
|
|
106
|
+
) -> Decorator[BlockEvent]:
|
|
107
|
+
"""Decorator for block event handlers."""
|
|
108
|
+
return self._dispatcher.on(BlockEvent, where=where)
|
|
109
|
+
|
|
110
|
+
def on_transaction(
|
|
111
|
+
self,
|
|
112
|
+
where: t.Optional[TransactionWhere] = None,
|
|
113
|
+
) -> Decorator[TransactionEvent]:
|
|
114
|
+
"""Decorator for transaction event handlers."""
|
|
115
|
+
return self._dispatcher.on(TransactionEvent, where=where)
|
|
116
|
+
|
|
117
|
+
def on_transactions(
|
|
118
|
+
self,
|
|
119
|
+
where: t.Optional[TransactionsWhere] = None,
|
|
120
|
+
) -> Decorator[TransactionsEvent]:
|
|
121
|
+
"""Decorator for batch transaction event handlers."""
|
|
122
|
+
return self._dispatcher.on(TransactionsEvent, where=where)
|
|
123
|
+
|
|
124
|
+
def _get_last_mc_block(self) -> BlockIdExt:
|
|
125
|
+
"""Return last masterchain block."""
|
|
126
|
+
return self._client.provider.last_mc_block
|
|
127
|
+
|
|
128
|
+
async def _lookup_mc_block(
|
|
129
|
+
self,
|
|
130
|
+
seqno: t.Optional[int] = None,
|
|
131
|
+
lt: t.Optional[int] = None,
|
|
132
|
+
utime: t.Optional[int] = None,
|
|
133
|
+
) -> BlockIdExt:
|
|
134
|
+
"""Lookup masterchain block by seqno, lt, or utime."""
|
|
135
|
+
mc_block, _info = await self._client.lookup_block(
|
|
136
|
+
workchain=WorkchainID.MASTERCHAIN,
|
|
137
|
+
shard=MASTERCHAIN_SHARD,
|
|
138
|
+
seqno=seqno,
|
|
139
|
+
lt=lt,
|
|
140
|
+
utime=utime,
|
|
141
|
+
)
|
|
142
|
+
return mc_block
|
|
143
|
+
|
|
144
|
+
async def _init_state(
|
|
145
|
+
self,
|
|
146
|
+
seqno: t.Optional[int] = None,
|
|
147
|
+
lt: t.Optional[int] = None,
|
|
148
|
+
utime: t.Optional[int] = None,
|
|
149
|
+
) -> _ScanState:
|
|
150
|
+
"""Initialize scanning state."""
|
|
151
|
+
if seqno is None and lt is None and utime is None:
|
|
152
|
+
mc_block = self._get_last_mc_block()
|
|
153
|
+
else:
|
|
154
|
+
mc_block = await self._lookup_mc_block(seqno=seqno, lt=lt, utime=utime)
|
|
155
|
+
|
|
156
|
+
if mc_block.seqno > 0:
|
|
157
|
+
prev_mc = await self._lookup_mc_block(seqno=mc_block.seqno - 1)
|
|
158
|
+
else:
|
|
159
|
+
prev_mc = mc_block
|
|
160
|
+
|
|
161
|
+
shards_seqno: t.Dict[t.Tuple[int, int], int] = {}
|
|
162
|
+
for shard in await self._client.get_all_shards_info(prev_mc):
|
|
163
|
+
shards_seqno[self._traversal.shard_key(shard)] = shard.seqno
|
|
164
|
+
|
|
165
|
+
return _ScanState(mc_block=mc_block, shards_seqno=shards_seqno)
|
|
166
|
+
|
|
167
|
+
def _ensure_running(self) -> None:
|
|
168
|
+
"""Raise CancelledError if scanner was stopped."""
|
|
169
|
+
if self._stop_event.is_set():
|
|
170
|
+
raise asyncio.CancelledError("Block scanner stopped")
|
|
171
|
+
|
|
172
|
+
async def _collect_blocks(
|
|
173
|
+
self,
|
|
174
|
+
mc_block: BlockIdExt,
|
|
175
|
+
shards_seqno: t.Dict[t.Tuple[int, int], int],
|
|
176
|
+
) -> t.List[BlockIdExt]:
|
|
177
|
+
"""Collect all unseen shard blocks for a masterchain block."""
|
|
178
|
+
shards = await self._client.get_all_shards_info(mc_block)
|
|
179
|
+
|
|
180
|
+
blocks: t.List[BlockIdExt] = []
|
|
181
|
+
for shard_tip in shards:
|
|
182
|
+
blocks.extend(
|
|
183
|
+
await self._traversal.walk_unseen(
|
|
184
|
+
root=shard_tip,
|
|
185
|
+
seen_seqno=shards_seqno,
|
|
186
|
+
get_header=self._client.get_block_header,
|
|
187
|
+
)
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
blocks.sort(key=lambda b: (b.workchain, b.shard, b.seqno))
|
|
191
|
+
return blocks
|
|
192
|
+
|
|
193
|
+
def _emit_block(self, mc_block: BlockIdExt, block: BlockIdExt) -> None:
|
|
194
|
+
"""Emit block event."""
|
|
195
|
+
self._dispatcher.emit(
|
|
196
|
+
BlockEvent(
|
|
197
|
+
mc_block=mc_block,
|
|
198
|
+
client=self._client,
|
|
199
|
+
context=self._context,
|
|
200
|
+
block=block,
|
|
201
|
+
)
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
def _emit_transactions(
|
|
205
|
+
self,
|
|
206
|
+
mc_block: BlockIdExt,
|
|
207
|
+
block: BlockIdExt,
|
|
208
|
+
transactions: t.List[Transaction],
|
|
209
|
+
) -> None:
|
|
210
|
+
"""Emit batch transactions event."""
|
|
211
|
+
self._dispatcher.emit(
|
|
212
|
+
TransactionsEvent(
|
|
213
|
+
mc_block=mc_block,
|
|
214
|
+
client=self._client,
|
|
215
|
+
context=self._context,
|
|
216
|
+
block=block,
|
|
217
|
+
transactions=transactions,
|
|
218
|
+
)
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
def _emit_transaction(
|
|
222
|
+
self,
|
|
223
|
+
mc_block: BlockIdExt,
|
|
224
|
+
block: BlockIdExt,
|
|
225
|
+
transaction: Transaction,
|
|
226
|
+
) -> None:
|
|
227
|
+
"""Emit single transaction event."""
|
|
228
|
+
self._dispatcher.emit(
|
|
229
|
+
TransactionEvent(
|
|
230
|
+
mc_block=mc_block,
|
|
231
|
+
client=self._client,
|
|
232
|
+
context=self._context,
|
|
233
|
+
block=block,
|
|
234
|
+
transaction=transaction,
|
|
235
|
+
)
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
async def _handle_block(
|
|
239
|
+
self,
|
|
240
|
+
mc_block: BlockIdExt,
|
|
241
|
+
block: BlockIdExt,
|
|
242
|
+
shards_seqno: t.Dict[t.Tuple[int, int], int],
|
|
243
|
+
) -> None:
|
|
244
|
+
"""Process shard block and emit events for block + transactions."""
|
|
245
|
+
self._ensure_running()
|
|
246
|
+
|
|
247
|
+
shards_seqno[self._traversal.shard_key(block)] = block.seqno
|
|
248
|
+
self._emit_block(mc_block, block)
|
|
249
|
+
|
|
250
|
+
if not self._include_transactions:
|
|
251
|
+
return
|
|
252
|
+
|
|
253
|
+
transactions = await self._client.get_block_transactions_ext(block)
|
|
254
|
+
self._emit_transactions(mc_block, block, transactions)
|
|
255
|
+
|
|
256
|
+
for transaction in transactions:
|
|
257
|
+
self._ensure_running()
|
|
258
|
+
self._emit_transaction(mc_block, block, transaction)
|
|
259
|
+
|
|
260
|
+
async def _wait_next_mc_block(self, current: BlockIdExt) -> BlockIdExt:
|
|
261
|
+
"""Wait for next masterchain block, polling until available."""
|
|
262
|
+
next_seqno = current.seqno + 1
|
|
263
|
+
|
|
264
|
+
while True:
|
|
265
|
+
self._ensure_running()
|
|
266
|
+
last_mc_block = self._get_last_mc_block()
|
|
267
|
+
|
|
268
|
+
if next_seqno <= last_mc_block.seqno:
|
|
269
|
+
if next_seqno == last_mc_block.seqno:
|
|
270
|
+
return last_mc_block
|
|
271
|
+
return await self._lookup_mc_block(seqno=next_seqno)
|
|
272
|
+
|
|
273
|
+
await asyncio.sleep(self._poll_interval)
|
|
274
|
+
|
|
275
|
+
async def start(
|
|
276
|
+
self,
|
|
277
|
+
from_seqno: t.Optional[int] = None,
|
|
278
|
+
from_lt: t.Optional[int] = None,
|
|
279
|
+
from_utime: t.Optional[int] = None,
|
|
280
|
+
) -> None:
|
|
281
|
+
"""
|
|
282
|
+
Start scanning from the specified point.
|
|
283
|
+
|
|
284
|
+
:param from_seqno: start from specific masterchain sequence number.
|
|
285
|
+
:param from_lt: start from specific logical time (LT) of a block.
|
|
286
|
+
:param from_utime: start from specific Unix timestamp.
|
|
287
|
+
"""
|
|
288
|
+
if self._running:
|
|
289
|
+
raise RuntimeError("BlockScanner is already running")
|
|
290
|
+
|
|
291
|
+
self._running = True
|
|
292
|
+
self._stop_event.clear()
|
|
293
|
+
|
|
294
|
+
state = await self._init_state(
|
|
295
|
+
seqno=from_seqno,
|
|
296
|
+
lt=from_lt,
|
|
297
|
+
utime=from_utime,
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
try:
|
|
301
|
+
while not self._stop_event.is_set():
|
|
302
|
+
blocks = await self._collect_blocks(
|
|
303
|
+
mc_block=state.mc_block,
|
|
304
|
+
shards_seqno=state.shards_seqno,
|
|
305
|
+
)
|
|
306
|
+
for block in blocks:
|
|
307
|
+
await self._handle_block(state.mc_block, block, state.shards_seqno)
|
|
308
|
+
state.mc_block = await self._wait_next_mc_block(state.mc_block)
|
|
309
|
+
finally:
|
|
310
|
+
await self._dispatcher.aclose()
|
|
311
|
+
self._running = False
|
|
312
|
+
|
|
313
|
+
async def stop(self) -> None:
|
|
314
|
+
"""Stop scanning."""
|
|
315
|
+
self._stop_event.set()
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
import typing as t
|
|
2
|
+
|
|
3
|
+
from pytoniq_core.tl import BlockIdExt
|
|
4
|
+
from pytoniq_core.tlb.block import ExtBlkRef
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class ShardTraversal:
|
|
8
|
+
|
|
9
|
+
@staticmethod
|
|
10
|
+
def shard_key(blk: BlockIdExt) -> t.Tuple[int, int]:
|
|
11
|
+
return blk.workchain, blk.shard
|
|
12
|
+
|
|
13
|
+
@staticmethod
|
|
14
|
+
def simulate_overflow(x: int) -> int:
|
|
15
|
+
return (x + 2**63) % 2**64 - 2**63
|
|
16
|
+
|
|
17
|
+
@staticmethod
|
|
18
|
+
def lower_bit64(num: int) -> int:
|
|
19
|
+
return num & (~num + 1)
|
|
20
|
+
|
|
21
|
+
def get_child_shard(self, shard: int, *, left: bool) -> int:
|
|
22
|
+
x = self.lower_bit64(shard) >> 1
|
|
23
|
+
if left:
|
|
24
|
+
return self.simulate_overflow(shard - x)
|
|
25
|
+
return self.simulate_overflow(shard + x)
|
|
26
|
+
|
|
27
|
+
def get_parent_shard(self, shard: int) -> int:
|
|
28
|
+
x = self.lower_bit64(shard)
|
|
29
|
+
return self.simulate_overflow((shard - x) | (x << 1))
|
|
30
|
+
|
|
31
|
+
async def walk_unseen(
|
|
32
|
+
self,
|
|
33
|
+
*,
|
|
34
|
+
root: BlockIdExt,
|
|
35
|
+
seen_seqno: t.Dict[t.Tuple[int, int], int],
|
|
36
|
+
get_header: t.Callable[[BlockIdExt], t.Awaitable[t.Any]],
|
|
37
|
+
) -> t.List[BlockIdExt]:
|
|
38
|
+
out: t.List[BlockIdExt] = []
|
|
39
|
+
stack: t.List[BlockIdExt] = [root]
|
|
40
|
+
post: t.List[BlockIdExt] = []
|
|
41
|
+
|
|
42
|
+
while stack:
|
|
43
|
+
blk = stack.pop()
|
|
44
|
+
key = self.shard_key(blk)
|
|
45
|
+
if seen_seqno.get(key, -1) >= blk.seqno:
|
|
46
|
+
continue
|
|
47
|
+
|
|
48
|
+
post.append(blk)
|
|
49
|
+
_, header = await get_header(blk)
|
|
50
|
+
prev_ref = header.info.prev_ref
|
|
51
|
+
|
|
52
|
+
if prev_ref.type_ == "prev_blk_info":
|
|
53
|
+
prev: ExtBlkRef = prev_ref.prev
|
|
54
|
+
prev_shard = (
|
|
55
|
+
self.get_parent_shard(blk.shard)
|
|
56
|
+
if header.info.after_split
|
|
57
|
+
else blk.shard
|
|
58
|
+
)
|
|
59
|
+
stack.append(
|
|
60
|
+
BlockIdExt(
|
|
61
|
+
workchain=blk.workchain,
|
|
62
|
+
shard=prev_shard,
|
|
63
|
+
seqno=prev.seqno,
|
|
64
|
+
root_hash=prev.root_hash,
|
|
65
|
+
file_hash=prev.file_hash,
|
|
66
|
+
)
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
else:
|
|
70
|
+
prev1, prev2 = prev_ref.prev1, prev_ref.prev2
|
|
71
|
+
stack.append(
|
|
72
|
+
BlockIdExt(
|
|
73
|
+
workchain=blk.workchain,
|
|
74
|
+
shard=self.get_child_shard(blk.shard, left=True),
|
|
75
|
+
seqno=prev1.seqno,
|
|
76
|
+
root_hash=prev1.root_hash,
|
|
77
|
+
file_hash=prev1.file_hash,
|
|
78
|
+
)
|
|
79
|
+
)
|
|
80
|
+
stack.append(
|
|
81
|
+
BlockIdExt(
|
|
82
|
+
workchain=blk.workchain,
|
|
83
|
+
shard=self.get_child_shard(blk.shard, left=False),
|
|
84
|
+
seqno=prev2.seqno,
|
|
85
|
+
root_hash=prev2.root_hash,
|
|
86
|
+
file_hash=prev2.file_hash,
|
|
87
|
+
)
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
for blk in reversed(post):
|
|
91
|
+
key = self.shard_key(blk)
|
|
92
|
+
if seen_seqno.get(key, -1) >= blk.seqno:
|
|
93
|
+
continue
|
|
94
|
+
out.append(blk)
|
|
95
|
+
|
|
96
|
+
return out
|