eth-portfolio-temp 0.2.12__cp313-cp313-win32.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of eth-portfolio-temp might be problematic. Click here for more details.
- eth_portfolio/__init__.py +25 -0
- eth_portfolio/_argspec.cp313-win32.pyd +0 -0
- eth_portfolio/_argspec.py +42 -0
- eth_portfolio/_cache.py +121 -0
- eth_portfolio/_config.cp313-win32.pyd +0 -0
- eth_portfolio/_config.py +4 -0
- eth_portfolio/_db/__init__.py +0 -0
- eth_portfolio/_db/decorators.py +147 -0
- eth_portfolio/_db/entities.py +311 -0
- eth_portfolio/_db/utils.py +604 -0
- eth_portfolio/_decimal.py +156 -0
- eth_portfolio/_decorators.py +84 -0
- eth_portfolio/_exceptions.py +67 -0
- eth_portfolio/_ledgers/__init__.py +0 -0
- eth_portfolio/_ledgers/address.py +938 -0
- eth_portfolio/_ledgers/portfolio.py +327 -0
- eth_portfolio/_loaders/__init__.py +33 -0
- eth_portfolio/_loaders/_nonce.cp313-win32.pyd +0 -0
- eth_portfolio/_loaders/_nonce.py +196 -0
- eth_portfolio/_loaders/balances.cp313-win32.pyd +0 -0
- eth_portfolio/_loaders/balances.py +94 -0
- eth_portfolio/_loaders/token_transfer.py +217 -0
- eth_portfolio/_loaders/transaction.py +240 -0
- eth_portfolio/_loaders/utils.cp313-win32.pyd +0 -0
- eth_portfolio/_loaders/utils.py +68 -0
- eth_portfolio/_shitcoins.cp313-win32.pyd +0 -0
- eth_portfolio/_shitcoins.py +329 -0
- eth_portfolio/_stableish.cp313-win32.pyd +0 -0
- eth_portfolio/_stableish.py +42 -0
- eth_portfolio/_submodules.py +73 -0
- eth_portfolio/_utils.py +225 -0
- eth_portfolio/_ydb/__init__.py +0 -0
- eth_portfolio/_ydb/token_transfers.py +145 -0
- eth_portfolio/address.py +397 -0
- eth_portfolio/buckets.py +194 -0
- eth_portfolio/constants.cp313-win32.pyd +0 -0
- eth_portfolio/constants.py +82 -0
- eth_portfolio/portfolio.py +661 -0
- eth_portfolio/protocols/__init__.py +67 -0
- eth_portfolio/protocols/_base.py +108 -0
- eth_portfolio/protocols/convex.py +17 -0
- eth_portfolio/protocols/dsr.py +51 -0
- eth_portfolio/protocols/lending/README.md +6 -0
- eth_portfolio/protocols/lending/__init__.py +50 -0
- eth_portfolio/protocols/lending/_base.py +57 -0
- eth_portfolio/protocols/lending/compound.py +187 -0
- eth_portfolio/protocols/lending/liquity.py +110 -0
- eth_portfolio/protocols/lending/maker.py +104 -0
- eth_portfolio/protocols/lending/unit.py +46 -0
- eth_portfolio/protocols/liquity.py +16 -0
- eth_portfolio/py.typed +0 -0
- eth_portfolio/structs/__init__.py +43 -0
- eth_portfolio/structs/modified.py +69 -0
- eth_portfolio/structs/structs.py +637 -0
- eth_portfolio/typing/__init__.py +1447 -0
- eth_portfolio/typing/balance/single.py +176 -0
- eth_portfolio__mypyc.cp313-win32.pyd +0 -0
- eth_portfolio_scripts/__init__.py +20 -0
- eth_portfolio_scripts/_args.py +26 -0
- eth_portfolio_scripts/_logging.py +15 -0
- eth_portfolio_scripts/_portfolio.py +194 -0
- eth_portfolio_scripts/_utils.py +106 -0
- eth_portfolio_scripts/balances.cp313-win32.pyd +0 -0
- eth_portfolio_scripts/balances.py +52 -0
- eth_portfolio_scripts/docker/.grafana/dashboards/Portfolio/Balances.json +1962 -0
- eth_portfolio_scripts/docker/.grafana/dashboards/dashboards.yaml +10 -0
- eth_portfolio_scripts/docker/.grafana/datasources/datasources.yml +11 -0
- eth_portfolio_scripts/docker/__init__.cp313-win32.pyd +0 -0
- eth_portfolio_scripts/docker/__init__.py +16 -0
- eth_portfolio_scripts/docker/check.cp313-win32.pyd +0 -0
- eth_portfolio_scripts/docker/check.py +56 -0
- eth_portfolio_scripts/docker/docker-compose.yaml +61 -0
- eth_portfolio_scripts/docker/docker_compose.cp313-win32.pyd +0 -0
- eth_portfolio_scripts/docker/docker_compose.py +78 -0
- eth_portfolio_scripts/main.py +119 -0
- eth_portfolio_scripts/py.typed +1 -0
- eth_portfolio_scripts/victoria/__init__.py +73 -0
- eth_portfolio_scripts/victoria/types.py +38 -0
- eth_portfolio_temp-0.2.12.dist-info/METADATA +25 -0
- eth_portfolio_temp-0.2.12.dist-info/RECORD +83 -0
- eth_portfolio_temp-0.2.12.dist-info/WHEEL +5 -0
- eth_portfolio_temp-0.2.12.dist-info/entry_points.txt +2 -0
- eth_portfolio_temp-0.2.12.dist-info/top_level.txt +3 -0
|
@@ -0,0 +1,938 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module defines the :class:`~eth_portfolio.AddressLedgerBase`, :class:`~eth_portfolio.TransactionsList`,
|
|
3
|
+
:class:`~eth_portfolio.AddressTransactionsLedger`, :class:`~eth_portfolio.InternalTransfersList`,
|
|
4
|
+
:class:`~eth_portfolio.AddressInternalTransfersLedger`, :class:`~eth_portfolio.TokenTransfersList`,
|
|
5
|
+
and :class:`~eth_portfolio.AddressTokenTransfersLedger` classes. These classes manage and interact with ledger entries
|
|
6
|
+
such as transactions, internal transfers, and token transfers associated with Ethereum addresses within the `eth-portfolio` system.
|
|
7
|
+
|
|
8
|
+
These classes leverage the `a_sync` library to support both synchronous and asynchronous operations, allowing efficient data gathering
|
|
9
|
+
and processing without blocking, thus improving the overall responsiveness and performance of portfolio operations.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from abc import ABCMeta, abstractmethod
|
|
13
|
+
from asyncio import Lock, Queue, create_task, gather, sleep
|
|
14
|
+
from collections import defaultdict
|
|
15
|
+
from functools import partial
|
|
16
|
+
from http import HTTPStatus
|
|
17
|
+
from itertools import product
|
|
18
|
+
from logging import getLogger
|
|
19
|
+
from typing import (
|
|
20
|
+
TYPE_CHECKING,
|
|
21
|
+
AsyncGenerator,
|
|
22
|
+
AsyncIterator,
|
|
23
|
+
Callable,
|
|
24
|
+
Final,
|
|
25
|
+
Generic,
|
|
26
|
+
List,
|
|
27
|
+
NoReturn,
|
|
28
|
+
Optional,
|
|
29
|
+
Tuple,
|
|
30
|
+
Type,
|
|
31
|
+
TypeVar,
|
|
32
|
+
Union,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
import a_sync
|
|
36
|
+
import dank_mids
|
|
37
|
+
import eth_retry
|
|
38
|
+
from a_sync.asyncio import sleep0 as yield_to_loop
|
|
39
|
+
from aiohttp import ClientResponseError
|
|
40
|
+
from brownie import chain
|
|
41
|
+
from dank_mids.eth import TraceFilterParams
|
|
42
|
+
from eth_typing import BlockNumber, ChecksumAddress
|
|
43
|
+
from evmspec import FilterTrace
|
|
44
|
+
from evmspec.structs.receipt import Status
|
|
45
|
+
from evmspec.structs.trace import call, reward
|
|
46
|
+
from faster_async_lru import alru_cache
|
|
47
|
+
from typing_extensions import Unpack
|
|
48
|
+
from pandas import DataFrame # type: ignore
|
|
49
|
+
from tqdm import tqdm
|
|
50
|
+
from y import ERC20, Network
|
|
51
|
+
from y._decorators import stuck_coro_debugger
|
|
52
|
+
from y.datatypes import Block
|
|
53
|
+
from y.utils.events import BATCH_SIZE
|
|
54
|
+
|
|
55
|
+
from eth_portfolio import _exceptions, _loaders
|
|
56
|
+
from eth_portfolio._cache import cache_to_disk
|
|
57
|
+
from eth_portfolio._decorators import set_end_block_if_none
|
|
58
|
+
from eth_portfolio._loaders.transaction import get_nonce_at_block, load_transaction
|
|
59
|
+
from eth_portfolio._utils import PandableList, _AiterMixin, get_buffered_chain_height
|
|
60
|
+
from eth_portfolio._ydb.token_transfers import TokenTransfers
|
|
61
|
+
from eth_portfolio.structs import InternalTransfer, TokenTransfer, Transaction
|
|
62
|
+
|
|
63
|
+
if TYPE_CHECKING:
|
|
64
|
+
from eth_portfolio.address import PortfolioAddress
|
|
65
|
+
|
|
66
|
+
logger = getLogger(__name__)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
T = TypeVar("T")
|
|
70
|
+
|
|
71
|
+
_LedgerEntryList = TypeVar(
|
|
72
|
+
"_LedgerEntryList", "TransactionsList", "InternalTransfersList", "TokenTransfersList"
|
|
73
|
+
)
|
|
74
|
+
PandableLedgerEntryList = Union["TransactionsList", "InternalTransfersList", "TokenTransfersList"]
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class AddressLedgerBase(
|
|
78
|
+
a_sync.ASyncGenericBase, _AiterMixin[T], Generic[_LedgerEntryList, T], metaclass=ABCMeta
|
|
79
|
+
):
|
|
80
|
+
"""
|
|
81
|
+
Abstract base class for address ledgers in the eth-portfolio system.
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
__slots__ = (
|
|
85
|
+
"address",
|
|
86
|
+
"asynchronous",
|
|
87
|
+
"cached_from",
|
|
88
|
+
"cached_thru",
|
|
89
|
+
"load_prices",
|
|
90
|
+
"objects",
|
|
91
|
+
"portfolio_address",
|
|
92
|
+
"_lock",
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
def __init__(self, portfolio_address: "PortfolioAddress") -> None:
|
|
96
|
+
"""
|
|
97
|
+
Initializes the AddressLedgerBase instance.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
portfolio_address: The :class:`~eth_portfolio.address.PortfolioAddress` this ledger belongs to.
|
|
101
|
+
"""
|
|
102
|
+
|
|
103
|
+
# TODO replace the following line with an abc implementation.
|
|
104
|
+
# assert isinstance(portfolio_address, PortfolioAddress), f"address must be a PortfolioAddress. try passing in PortfolioAddress({portfolio_address}) instead."
|
|
105
|
+
|
|
106
|
+
super().__init__()
|
|
107
|
+
|
|
108
|
+
self.portfolio_address = portfolio_address
|
|
109
|
+
"""
|
|
110
|
+
The portfolio address this ledger belongs to.
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
self.address: Final = self.portfolio_address.address
|
|
114
|
+
"""
|
|
115
|
+
The Ethereum address being managed.
|
|
116
|
+
"""
|
|
117
|
+
|
|
118
|
+
self.asynchronous: Final = self.portfolio_address.asynchronous
|
|
119
|
+
"""
|
|
120
|
+
Flag indicating if the operations are asynchronous.
|
|
121
|
+
"""
|
|
122
|
+
|
|
123
|
+
self.load_prices: Final = self.portfolio_address.load_prices
|
|
124
|
+
"""
|
|
125
|
+
Indicates if price loading is enabled.
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
self.objects: Final[_LedgerEntryList] = self._list_type()
|
|
129
|
+
"""
|
|
130
|
+
_LedgerEntryList: List of ledger entries.
|
|
131
|
+
"""
|
|
132
|
+
|
|
133
|
+
# NOTE: The following two properties will both be ints once the cache has contents
|
|
134
|
+
self.cached_from: int = None # type: ignore
|
|
135
|
+
"""
|
|
136
|
+
The block from which all entries for this ledger have been loaded into memory.
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
self.cached_thru: int = None # type: ignore
|
|
140
|
+
"""
|
|
141
|
+
The block through which all entries for this ledger have been loaded into memory.
|
|
142
|
+
"""
|
|
143
|
+
|
|
144
|
+
self._lock: Final = Lock()
|
|
145
|
+
"""
|
|
146
|
+
Lock: Lock for synchronizing access to ledger entries.
|
|
147
|
+
"""
|
|
148
|
+
|
|
149
|
+
def __hash__(self) -> int:
|
|
150
|
+
"""
|
|
151
|
+
Returns the hash of the address.
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
The hash value.
|
|
155
|
+
"""
|
|
156
|
+
return hash(self.address)
|
|
157
|
+
|
|
158
|
+
def __repr__(self) -> str:
|
|
159
|
+
return f"<{type(self).__name__} for {self.address} at {hex(id(self))}>"
|
|
160
|
+
|
|
161
|
+
@property
|
|
162
|
+
@abstractmethod
|
|
163
|
+
def _list_type(self) -> Type[_LedgerEntryList]:
|
|
164
|
+
"""
|
|
165
|
+
Type of list used to store ledger entries.
|
|
166
|
+
"""
|
|
167
|
+
...
|
|
168
|
+
|
|
169
|
+
@property
|
|
170
|
+
def _start_block(self) -> int:
|
|
171
|
+
"""
|
|
172
|
+
Returns the starting block for the portfolio address.
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
The starting block number.
|
|
176
|
+
"""
|
|
177
|
+
return self.portfolio_address._start_block
|
|
178
|
+
|
|
179
|
+
async def _get_and_yield(
|
|
180
|
+
self, start_block: Block, end_block: Block, mem_cache: bool
|
|
181
|
+
) -> AsyncGenerator[T, None]:
|
|
182
|
+
"""
|
|
183
|
+
Yields ledger entries between the specified blocks.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
start_block: The starting block number.
|
|
187
|
+
end_block: The ending block number.
|
|
188
|
+
|
|
189
|
+
Yields:
|
|
190
|
+
AsyncGenerator[T, None]: An async generator of ledger entries.
|
|
191
|
+
"""
|
|
192
|
+
num_yielded = 0
|
|
193
|
+
|
|
194
|
+
async def unblock_loop() -> None:
|
|
195
|
+
"""
|
|
196
|
+
Let the event loop run at least once for every 100
|
|
197
|
+
objects yielded so it doesn't get too congested.
|
|
198
|
+
"""
|
|
199
|
+
nonlocal num_yielded
|
|
200
|
+
num_yielded += 1
|
|
201
|
+
if num_yielded % 500 == 0:
|
|
202
|
+
await yield_to_loop()
|
|
203
|
+
|
|
204
|
+
if not mem_cache:
|
|
205
|
+
async for ledger_entry in self._get_new_objects(start_block, end_block, False):
|
|
206
|
+
yield ledger_entry
|
|
207
|
+
await unblock_loop()
|
|
208
|
+
return
|
|
209
|
+
|
|
210
|
+
if self.objects and end_block and self.objects[-1].block_number > end_block:
|
|
211
|
+
for ledger_entry in self.objects:
|
|
212
|
+
block = ledger_entry.block_number
|
|
213
|
+
if block < start_block:
|
|
214
|
+
continue
|
|
215
|
+
elif block > end_block:
|
|
216
|
+
return
|
|
217
|
+
yield ledger_entry
|
|
218
|
+
await unblock_loop()
|
|
219
|
+
|
|
220
|
+
yielded = set()
|
|
221
|
+
for ledger_entry in self.objects:
|
|
222
|
+
block = ledger_entry.block_number
|
|
223
|
+
if block < start_block:
|
|
224
|
+
continue
|
|
225
|
+
elif end_block and block > end_block:
|
|
226
|
+
break
|
|
227
|
+
yield ledger_entry
|
|
228
|
+
yielded.add(ledger_entry)
|
|
229
|
+
await unblock_loop()
|
|
230
|
+
async for ledger_entry in self._get_new_objects(start_block, end_block, True): # type: ignore [assignment, misc]
|
|
231
|
+
if ledger_entry not in yielded:
|
|
232
|
+
yield ledger_entry
|
|
233
|
+
yielded.add(ledger_entry)
|
|
234
|
+
await unblock_loop()
|
|
235
|
+
for ledger_entry in self.objects:
|
|
236
|
+
block = ledger_entry.block_number
|
|
237
|
+
if block < start_block:
|
|
238
|
+
continue
|
|
239
|
+
elif end_block and block > end_block:
|
|
240
|
+
break
|
|
241
|
+
if ledger_entry not in yielded:
|
|
242
|
+
yield ledger_entry
|
|
243
|
+
yielded.add(ledger_entry)
|
|
244
|
+
await unblock_loop()
|
|
245
|
+
|
|
246
|
+
@set_end_block_if_none
|
|
247
|
+
@stuck_coro_debugger
|
|
248
|
+
async def get(self, start_block: Block, end_block: Block) -> _LedgerEntryList:
|
|
249
|
+
"""
|
|
250
|
+
Retrieves ledger entries between the specified blocks.
|
|
251
|
+
|
|
252
|
+
Args:
|
|
253
|
+
start_block: The starting block number.
|
|
254
|
+
end_block: The ending block number.
|
|
255
|
+
|
|
256
|
+
Returns:
|
|
257
|
+
_LedgerEntryList: The list of ledger entries.
|
|
258
|
+
|
|
259
|
+
Examples:
|
|
260
|
+
>>> entries = await ledger.get(12000000, 12345678)
|
|
261
|
+
"""
|
|
262
|
+
return self._list_type([ledger_entry async for ledger_entry in self[start_block:end_block]])
|
|
263
|
+
|
|
264
|
+
@stuck_coro_debugger
|
|
265
|
+
async def new(self) -> _LedgerEntryList:
|
|
266
|
+
"""
|
|
267
|
+
Retrieves new ledger entries since the last cached block.
|
|
268
|
+
|
|
269
|
+
Returns:
|
|
270
|
+
_LedgerEntryList: The list of new ledger entries.
|
|
271
|
+
|
|
272
|
+
Examples:
|
|
273
|
+
>>> new_entries = await ledger.new()
|
|
274
|
+
"""
|
|
275
|
+
start_block = 0 if self.cached_thru is None else self.cached_thru + 1
|
|
276
|
+
end_block = await get_buffered_chain_height()
|
|
277
|
+
return self[start_block, end_block] # type: ignore [index, return-value]
|
|
278
|
+
|
|
279
|
+
async def sent(
|
|
280
|
+
self, start_block: Optional[Block] = None, end_block: Optional[Block] = None
|
|
281
|
+
) -> AsyncIterator[T]:
|
|
282
|
+
address = self.portfolio_address.address
|
|
283
|
+
async for obj in self[start_block:end_block]:
|
|
284
|
+
if obj.from_address == address:
|
|
285
|
+
yield obj
|
|
286
|
+
|
|
287
|
+
async def received(
|
|
288
|
+
self, start_block: Optional[Block] = None, end_block: Optional[Block] = None
|
|
289
|
+
) -> AsyncIterator[T]:
|
|
290
|
+
address = self.portfolio_address.address
|
|
291
|
+
async for obj in self[start_block:end_block]:
|
|
292
|
+
if obj.from_address != address:
|
|
293
|
+
yield obj
|
|
294
|
+
|
|
295
|
+
@stuck_coro_debugger
|
|
296
|
+
@set_end_block_if_none
|
|
297
|
+
async def _get_new_objects(
|
|
298
|
+
self, start_block: Block, end_block: Block, mem_cache: bool
|
|
299
|
+
) -> AsyncIterator[T]:
|
|
300
|
+
"""
|
|
301
|
+
Retrieves new ledger entries between the specified blocks.
|
|
302
|
+
|
|
303
|
+
Args:
|
|
304
|
+
start_block: The starting block number.
|
|
305
|
+
end_block: The ending block number.
|
|
306
|
+
|
|
307
|
+
Yields:
|
|
308
|
+
AsyncIterator[T]: An async iterator of new ledger entries.
|
|
309
|
+
"""
|
|
310
|
+
async with self._lock:
|
|
311
|
+
async for ledger_entry in self._load_new_objects(start_block, end_block, mem_cache):
|
|
312
|
+
yield ledger_entry
|
|
313
|
+
|
|
314
|
+
@abstractmethod
|
|
315
|
+
async def _load_new_objects(
|
|
316
|
+
self, start_block: Block, end_block: Block, mem_cache: bool
|
|
317
|
+
) -> AsyncIterator[T]:
|
|
318
|
+
"""
|
|
319
|
+
Abstract method to load new ledger entries between the specified blocks.
|
|
320
|
+
|
|
321
|
+
Args:
|
|
322
|
+
start_block: The starting block number.
|
|
323
|
+
end_block: The ending block number.
|
|
324
|
+
|
|
325
|
+
Yields:
|
|
326
|
+
AsyncIterator[T]: An async iterator of new ledger entries.
|
|
327
|
+
"""
|
|
328
|
+
yield # type: ignore [misc]
|
|
329
|
+
|
|
330
|
+
def _check_blocks_against_cache(
|
|
331
|
+
self, start_block: Block, end_block: Block
|
|
332
|
+
) -> Tuple[Block, Block]:
|
|
333
|
+
"""
|
|
334
|
+
Checks the specified block range against the cached block range.
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
start_block: The starting block number.
|
|
338
|
+
end_block: The ending block number.
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
Tuple: The adjusted block range.
|
|
342
|
+
|
|
343
|
+
Raises:
|
|
344
|
+
ValueError: If the start block is after the end block.
|
|
345
|
+
_exceptions.BlockRangeIsCached: If the block range is already cached.
|
|
346
|
+
_exceptions.BlockRangeOutOfBounds: If the block range is out of bounds.
|
|
347
|
+
"""
|
|
348
|
+
if start_block > end_block:
|
|
349
|
+
raise ValueError(f"Start block {start_block} is after end block {end_block}")
|
|
350
|
+
|
|
351
|
+
# There is no cache
|
|
352
|
+
elif self.cached_from is None or self.cached_thru is None:
|
|
353
|
+
return start_block, end_block
|
|
354
|
+
|
|
355
|
+
# Range is cached
|
|
356
|
+
elif start_block >= self.cached_from and end_block <= self.cached_thru:
|
|
357
|
+
raise _exceptions.BlockRangeIsCached()
|
|
358
|
+
|
|
359
|
+
# Beginning of range is cached
|
|
360
|
+
elif (
|
|
361
|
+
start_block >= self.cached_from
|
|
362
|
+
and start_block < self.cached_thru
|
|
363
|
+
and end_block > self.cached_thru
|
|
364
|
+
):
|
|
365
|
+
return self.cached_thru + 1, end_block
|
|
366
|
+
|
|
367
|
+
# End of range is cached
|
|
368
|
+
elif (
|
|
369
|
+
start_block < self.cached_from
|
|
370
|
+
and end_block >= self.cached_from
|
|
371
|
+
and end_block < self.cached_thru
|
|
372
|
+
):
|
|
373
|
+
return start_block, self.cached_from - 1
|
|
374
|
+
|
|
375
|
+
# Beginning and end both outside bounds of cache to high side
|
|
376
|
+
elif start_block > self.cached_thru:
|
|
377
|
+
return self.cached_thru + 1, end_block
|
|
378
|
+
|
|
379
|
+
# Beginning and end both outside bounds of cache to low side
|
|
380
|
+
elif end_block < self.cached_from:
|
|
381
|
+
return start_block, self.cached_from - 1
|
|
382
|
+
|
|
383
|
+
# Beginning and end both outside bounds of cache, split
|
|
384
|
+
elif start_block < self.cached_from and end_block > self.cached_thru:
|
|
385
|
+
raise _exceptions.BlockRangeOutOfBounds(start_block, end_block, self)
|
|
386
|
+
|
|
387
|
+
raise NotImplementedError(
|
|
388
|
+
f"This is a work in progress and we still need code for this specific case. Feel free to create an issue on our github if you need this.\n\nstart_block: {start_block} end_block: {end_block} cached_from: {self.cached_from} cached_thru: {self.cached_thru}"
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
class TransactionsList(PandableList[Transaction]):
|
|
393
|
+
"""
|
|
394
|
+
A list subclass for transactions that can convert to a :class:`DataFrame`.
|
|
395
|
+
"""
|
|
396
|
+
|
|
397
|
+
def _df(self) -> DataFrame:
|
|
398
|
+
"""
|
|
399
|
+
Converts the list of transactions to a DataFrame.
|
|
400
|
+
|
|
401
|
+
Returns:
|
|
402
|
+
DataFrame: The transactions as a DataFrame.
|
|
403
|
+
"""
|
|
404
|
+
df = DataFrame(self)
|
|
405
|
+
if len(df) > 0:
|
|
406
|
+
df.chainId = df.chainId.apply(int)
|
|
407
|
+
df.blockNumber = df.blockNumber.apply(int)
|
|
408
|
+
df.transactionIndex = df.transactionIndex.apply(int)
|
|
409
|
+
df.nonce = df.nonce.apply(int)
|
|
410
|
+
df.gas = df.gas.apply(int)
|
|
411
|
+
df.gasPrice = df.gasPrice.apply(int)
|
|
412
|
+
return df
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
Nonce = int
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
class AddressTransactionsLedger(AddressLedgerBase[TransactionsList, Transaction]):
|
|
419
|
+
"""
|
|
420
|
+
A ledger for managing transaction entries.
|
|
421
|
+
"""
|
|
422
|
+
|
|
423
|
+
_list_type = TransactionsList
|
|
424
|
+
__slots__ = ("cached_thru_nonce", "_queue", "_ready", "_num_workers", "_workers")
|
|
425
|
+
|
|
426
|
+
def __init__(self, portfolio_address: "PortfolioAddress", num_workers: int = 1000):
|
|
427
|
+
"""
|
|
428
|
+
Initializes the AddressTransactionsLedger instance.
|
|
429
|
+
|
|
430
|
+
Args:
|
|
431
|
+
portfolio_address: The :class:`~eth_portfolio.address.PortfolioAddress` this ledger belongs to.
|
|
432
|
+
"""
|
|
433
|
+
super().__init__(portfolio_address)
|
|
434
|
+
self.cached_thru_nonce = -1
|
|
435
|
+
"""
|
|
436
|
+
The nonce through which all transactions have been loaded into memory.
|
|
437
|
+
"""
|
|
438
|
+
self._queue = Queue()
|
|
439
|
+
self._ready = Queue()
|
|
440
|
+
self._num_workers = num_workers
|
|
441
|
+
self._workers = []
|
|
442
|
+
|
|
443
|
+
def __del__(self) -> None:
|
|
444
|
+
self.__stop_workers()
|
|
445
|
+
|
|
446
|
+
@stuck_coro_debugger
|
|
447
|
+
@set_end_block_if_none
|
|
448
|
+
async def _load_new_objects(self, _: Block, end_block: Block, mem_cache: bool) -> AsyncIterator[Transaction]: # type: ignore [override]
|
|
449
|
+
"""
|
|
450
|
+
Loads new transaction entries between the specified blocks.
|
|
451
|
+
|
|
452
|
+
Args:
|
|
453
|
+
_: The starting block number (unused).
|
|
454
|
+
end_block: The ending block number.
|
|
455
|
+
|
|
456
|
+
Yields:
|
|
457
|
+
AsyncIterator[Transaction]: An async iterator of transaction entries.
|
|
458
|
+
"""
|
|
459
|
+
if self.cached_thru and end_block < self.cached_thru:
|
|
460
|
+
return
|
|
461
|
+
if not mem_cache:
|
|
462
|
+
logger.warning(
|
|
463
|
+
f"{type(self).__name__}._load_new_objects mem_cache arg is not yet implemented"
|
|
464
|
+
)
|
|
465
|
+
address = self.address
|
|
466
|
+
end_block_nonce: int = await get_nonce_at_block(address, end_block)
|
|
467
|
+
if nonces := tuple(range(self.cached_thru_nonce + 1, end_block_nonce + 1)):
|
|
468
|
+
for i, nonce in enumerate(nonces):
|
|
469
|
+
self._queue.put_nowait(nonce)
|
|
470
|
+
|
|
471
|
+
# Keep the event loop relatively unblocked
|
|
472
|
+
# and let the rpc start doing work asap
|
|
473
|
+
if i % 1000:
|
|
474
|
+
await yield_to_loop()
|
|
475
|
+
|
|
476
|
+
len_nonces = len(nonces)
|
|
477
|
+
del nonces
|
|
478
|
+
|
|
479
|
+
self._ensure_workers(min(len_nonces, self._num_workers))
|
|
480
|
+
|
|
481
|
+
transactions = []
|
|
482
|
+
transaction: Optional[Transaction]
|
|
483
|
+
for _ in tqdm(range(len_nonces), desc=f"Transactions {address}"):
|
|
484
|
+
nonce, transaction = await self._ready.get()
|
|
485
|
+
if transaction:
|
|
486
|
+
if isinstance(transaction, Exception):
|
|
487
|
+
raise transaction
|
|
488
|
+
transactions.append(transaction)
|
|
489
|
+
yield transaction
|
|
490
|
+
elif nonce == 0 and self.cached_thru_nonce == -1:
|
|
491
|
+
# Gnosis safes
|
|
492
|
+
self.cached_thru_nonce = 0
|
|
493
|
+
else:
|
|
494
|
+
# NOTE Are we sure this is the correct way to handle this scenario? Are we sure it will ever even occur with the new gnosis handling?
|
|
495
|
+
logger.warning("No transaction with nonce %s for %s", nonce, address)
|
|
496
|
+
|
|
497
|
+
self.__stop_workers()
|
|
498
|
+
|
|
499
|
+
if transactions:
|
|
500
|
+
self.objects.extend(transactions)
|
|
501
|
+
if self.objects:
|
|
502
|
+
self.objects.sort(key=lambda t: t.nonce)
|
|
503
|
+
self.cached_thru_nonce = self.objects[-1].nonce
|
|
504
|
+
|
|
505
|
+
if self.cached_from is None:
|
|
506
|
+
self.cached_from = 0
|
|
507
|
+
if self.cached_thru is None or end_block > self.cached_thru:
|
|
508
|
+
self.cached_thru = end_block
|
|
509
|
+
|
|
510
|
+
def _ensure_workers(self, num_workers: int) -> None:
|
|
511
|
+
len_workers = len(self._workers)
|
|
512
|
+
if len_workers < num_workers:
|
|
513
|
+
worker_fn = self.__worker_fn
|
|
514
|
+
address = self.address
|
|
515
|
+
load_prices = self.load_prices
|
|
516
|
+
queue_get = stuck_coro_debugger(self._queue.get)
|
|
517
|
+
put_ready = self._ready.put_nowait
|
|
518
|
+
|
|
519
|
+
self._workers.extend(
|
|
520
|
+
create_task(
|
|
521
|
+
coro=worker_fn(address, load_prices, queue_get, put_ready),
|
|
522
|
+
name=f"AddressTransactionsLedger worker {i} for {address}",
|
|
523
|
+
)
|
|
524
|
+
for i in range(num_workers - len_workers)
|
|
525
|
+
)
|
|
526
|
+
|
|
527
|
+
async def __worker_fn(
|
|
528
|
+
self,
|
|
529
|
+
address: ChecksumAddress,
|
|
530
|
+
load_prices: bool,
|
|
531
|
+
queue_get: Callable[[], Nonce],
|
|
532
|
+
put_ready: Callable[[Nonce, Optional[Transaction]], None],
|
|
533
|
+
) -> NoReturn:
|
|
534
|
+
try:
|
|
535
|
+
while True:
|
|
536
|
+
nonce = await queue_get()
|
|
537
|
+
try:
|
|
538
|
+
put_ready(await load_transaction(address, nonce, load_prices))
|
|
539
|
+
except Exception as e:
|
|
540
|
+
put_ready((nonce, e))
|
|
541
|
+
except Exception as e:
|
|
542
|
+
logger.error("%s in %s __worker_coro", type(e), self)
|
|
543
|
+
logger.exception(e)
|
|
544
|
+
raise
|
|
545
|
+
|
|
546
|
+
def __stop_workers(self) -> None:
|
|
547
|
+
logger.debug("stopping workers for %s", self)
|
|
548
|
+
workers = self._workers
|
|
549
|
+
pop_next = workers.pop
|
|
550
|
+
for _ in range(len(workers)):
|
|
551
|
+
pop_next().cancel()
|
|
552
|
+
|
|
553
|
+
|
|
554
|
+
class InternalTransfersList(PandableList[InternalTransfer]):
|
|
555
|
+
"""
|
|
556
|
+
A list subclass for internal transfer entries that can convert to a :class:`DataFrame`.
|
|
557
|
+
"""
|
|
558
|
+
|
|
559
|
+
|
|
560
|
+
@a_sync.Semaphore(128, __name__ + ".trace_filter")
|
|
561
|
+
@stuck_coro_debugger
|
|
562
|
+
@eth_retry.auto_retry
|
|
563
|
+
async def trace_filter(
|
|
564
|
+
from_block: BlockNumber,
|
|
565
|
+
to_block: BlockNumber,
|
|
566
|
+
params: TraceFilterParams,
|
|
567
|
+
) -> List[FilterTrace]:
|
|
568
|
+
return await __trace_filter(from_block, to_block, params)
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
async def __trace_filter(
|
|
572
|
+
from_block: BlockNumber,
|
|
573
|
+
to_block: BlockNumber,
|
|
574
|
+
params: TraceFilterParams,
|
|
575
|
+
) -> List[FilterTrace]:
|
|
576
|
+
try:
|
|
577
|
+
return await dank_mids.eth.trace_filter(
|
|
578
|
+
{"fromBlock": from_block, "toBlock": to_block, **params}
|
|
579
|
+
)
|
|
580
|
+
except ClientResponseError as e:
|
|
581
|
+
if e.status != HTTPStatus.SERVICE_UNAVAILABLE or to_block == from_block:
|
|
582
|
+
raise
|
|
583
|
+
except TypeError as e:
|
|
584
|
+
# This is some intermittent error I need to debug in dank_mids, I think it occurs when we get rate limited
|
|
585
|
+
if str(e) != "a bytes-like object is required, not 'NoneType'":
|
|
586
|
+
raise
|
|
587
|
+
await sleep(0.5)
|
|
588
|
+
# remove this logger when I know there are no looping issues
|
|
589
|
+
logger.info("call failed, trying again")
|
|
590
|
+
|
|
591
|
+
range_size = to_block - from_block + 1
|
|
592
|
+
chunk_size = range_size // 2
|
|
593
|
+
halfway = from_block + chunk_size
|
|
594
|
+
|
|
595
|
+
results = await gather(
|
|
596
|
+
__trace_filter(from_block, BlockNumber(halfway), params),
|
|
597
|
+
__trace_filter(BlockNumber(halfway + 1), to_block, params),
|
|
598
|
+
)
|
|
599
|
+
return results[0] + results[1]
|
|
600
|
+
|
|
601
|
+
|
|
602
|
+
@alru_cache(maxsize=None)
|
|
603
|
+
@eth_retry.auto_retry(min_sleep_time=1, max_sleep_time=3, max_retries=20, suppress_logs=1)
|
|
604
|
+
async def get_transaction_status(txhash: str) -> Status:
|
|
605
|
+
"""
|
|
606
|
+
Retrieves the status for a transaction.
|
|
607
|
+
|
|
608
|
+
This function is cached to disk to reduce resource usage.
|
|
609
|
+
|
|
610
|
+
Args:
|
|
611
|
+
txhash: The hash of the transaction.
|
|
612
|
+
|
|
613
|
+
Returns:
|
|
614
|
+
The status of the transaction.
|
|
615
|
+
"""
|
|
616
|
+
return await dank_mids.eth.get_transaction_status(txhash)
|
|
617
|
+
|
|
618
|
+
|
|
619
|
+
_trace_semaphores = defaultdict(lambda: a_sync.Semaphore(4, __name__ + ".trace_semaphore"))
|
|
620
|
+
|
|
621
|
+
|
|
622
|
+
@cache_to_disk
|
|
623
|
+
@eth_retry.auto_retry
|
|
624
|
+
async def get_traces(
|
|
625
|
+
from_block: BlockNumber,
|
|
626
|
+
to_block: BlockNumber,
|
|
627
|
+
filter_params: TraceFilterParams,
|
|
628
|
+
) -> List[FilterTrace]:
|
|
629
|
+
"""
|
|
630
|
+
Retrieves traces from the web3 provider using the given parameters.
|
|
631
|
+
|
|
632
|
+
This function is cached to disk to reduce resource usage.
|
|
633
|
+
|
|
634
|
+
Args:
|
|
635
|
+
filter_params: The parameters for the trace filter.
|
|
636
|
+
|
|
637
|
+
Returns:
|
|
638
|
+
The list of traces.
|
|
639
|
+
"""
|
|
640
|
+
if chain.id == Network.Polygon:
|
|
641
|
+
logger.warning(
|
|
642
|
+
"polygon doesnt support trace_filter method, must develop alternate solution"
|
|
643
|
+
)
|
|
644
|
+
return []
|
|
645
|
+
semaphore_key = (
|
|
646
|
+
tuple(filter_params.get("toAddress", ("",))),
|
|
647
|
+
tuple(filter_params.get("fromAddress", ("",))),
|
|
648
|
+
)
|
|
649
|
+
async with _trace_semaphores[semaphore_key]:
|
|
650
|
+
traces = await trace_filter(from_block, to_block, filter_params)
|
|
651
|
+
return await _check_traces(traces) if traces else []
|
|
652
|
+
|
|
653
|
+
|
|
654
|
+
@stuck_coro_debugger
|
|
655
|
+
@eth_retry.auto_retry
|
|
656
|
+
async def _check_traces(traces: List[FilterTrace]) -> List[FilterTrace]:
|
|
657
|
+
good_traces = []
|
|
658
|
+
append = good_traces.append
|
|
659
|
+
|
|
660
|
+
check_status_tasks = a_sync.TaskMapping(get_transaction_status)
|
|
661
|
+
|
|
662
|
+
for i, trace in enumerate(traces):
|
|
663
|
+
# Make sure we don't block up the event loop
|
|
664
|
+
if i % 500:
|
|
665
|
+
await yield_to_loop()
|
|
666
|
+
|
|
667
|
+
if "error" in trace:
|
|
668
|
+
continue
|
|
669
|
+
|
|
670
|
+
# NOTE: Not sure why these appear, but I've yet to come across an internal transfer
|
|
671
|
+
# that actually transmitted value to the singleton even though they appear to.
|
|
672
|
+
if (
|
|
673
|
+
isinstance(trace, call.Trace)
|
|
674
|
+
and trace.action.to == "0xd9Db270c1B5E3Bd161E8c8503c55cEABeE709552"
|
|
675
|
+
): # Gnosis Safe Singleton 1.3.0
|
|
676
|
+
continue
|
|
677
|
+
|
|
678
|
+
if not isinstance(trace, reward.Trace):
|
|
679
|
+
# NOTE: We don't need to confirm block rewards came from a successful transaction, because they don't come from a transaction
|
|
680
|
+
check_status_tasks[trace.transactionHash]
|
|
681
|
+
|
|
682
|
+
append(trace)
|
|
683
|
+
|
|
684
|
+
# NOTE: We don't need to confirm block rewards came from a successful transaction, because they don't come from a transaction
|
|
685
|
+
return [
|
|
686
|
+
trace
|
|
687
|
+
for trace in good_traces
|
|
688
|
+
if isinstance(trace, reward.Trace)
|
|
689
|
+
or await check_status_tasks[trace.transactionHash] == Status.success
|
|
690
|
+
]
|
|
691
|
+
|
|
692
|
+
|
|
693
|
+
BlockRange = Tuple[Block, Block]
|
|
694
|
+
|
|
695
|
+
|
|
696
|
+
def _get_block_ranges(start_block: Block, end_block: Block) -> List[BlockRange]:
|
|
697
|
+
return [(i, i + BATCH_SIZE - 1) for i in range(start_block, end_block, BATCH_SIZE)]
|
|
698
|
+
|
|
699
|
+
|
|
700
|
+
class AddressInternalTransfersLedger(AddressLedgerBase[InternalTransfersList, InternalTransfer]):
|
|
701
|
+
"""
|
|
702
|
+
A ledger for managing internal transfer entries.
|
|
703
|
+
"""
|
|
704
|
+
|
|
705
|
+
_list_type = InternalTransfersList
|
|
706
|
+
|
|
707
|
+
@stuck_coro_debugger
|
|
708
|
+
@set_end_block_if_none
|
|
709
|
+
async def _load_new_objects(
|
|
710
|
+
self, start_block: Block, end_block: Block, mem_cache: bool
|
|
711
|
+
) -> AsyncIterator[InternalTransfer]:
|
|
712
|
+
"""
|
|
713
|
+
Loads new internal transfer entries between the specified blocks.
|
|
714
|
+
|
|
715
|
+
Args:
|
|
716
|
+
start_block: The starting block number.
|
|
717
|
+
end_block: The ending block number.
|
|
718
|
+
|
|
719
|
+
Yields:
|
|
720
|
+
AsyncIterator[InternalTransfer]: An async iterator of internal transfer entries.
|
|
721
|
+
"""
|
|
722
|
+
if start_block == 0:
|
|
723
|
+
start_block = 1
|
|
724
|
+
|
|
725
|
+
if mem_cache:
|
|
726
|
+
try:
|
|
727
|
+
start_block, end_block = self._check_blocks_against_cache(start_block, end_block)
|
|
728
|
+
except _exceptions.BlockRangeIsCached:
|
|
729
|
+
return
|
|
730
|
+
except _exceptions.BlockRangeOutOfBounds as e:
|
|
731
|
+
await e.load_remaining()
|
|
732
|
+
return
|
|
733
|
+
|
|
734
|
+
# TODO: figure out where this float comes from and raise a TypeError there
|
|
735
|
+
if isinstance(start_block, float) and int(start_block) == start_block:
|
|
736
|
+
start_block = int(start_block)
|
|
737
|
+
if isinstance(end_block, float) and int(end_block) == end_block:
|
|
738
|
+
end_block = int(end_block)
|
|
739
|
+
|
|
740
|
+
address = self.address
|
|
741
|
+
if start_block == end_block:
|
|
742
|
+
trace_filter_coros = [
|
|
743
|
+
get_traces(start_block, end_block, {"toAddress": [address]}),
|
|
744
|
+
get_traces(start_block, end_block, {"fromAddress": [address]}),
|
|
745
|
+
]
|
|
746
|
+
else:
|
|
747
|
+
block_ranges = _get_block_ranges(start_block, end_block)
|
|
748
|
+
addr_filters = {"toAddress": [address]}, {"fromAddress": [address]}
|
|
749
|
+
trace_filter_coros = [
|
|
750
|
+
get_traces(start, end, addr_filter)
|
|
751
|
+
for (start, end), addr_filter in product(block_ranges, addr_filters)
|
|
752
|
+
]
|
|
753
|
+
|
|
754
|
+
# NOTE: We only want tqdm progress bar when there is work to do
|
|
755
|
+
if len(trace_filter_coros) < 10:
|
|
756
|
+
generator_function = a_sync.as_completed
|
|
757
|
+
else:
|
|
758
|
+
generator_function = partial( # type: ignore [assignment]
|
|
759
|
+
a_sync.as_completed, tqdm=True, desc=f"Trace Filters {address}"
|
|
760
|
+
)
|
|
761
|
+
|
|
762
|
+
load = InternalTransfer.from_trace
|
|
763
|
+
|
|
764
|
+
if mem_cache:
|
|
765
|
+
internal_transfers = []
|
|
766
|
+
append_transfer = internal_transfers.append
|
|
767
|
+
|
|
768
|
+
done = 0
|
|
769
|
+
if self.load_prices:
|
|
770
|
+
traces = []
|
|
771
|
+
async for chunk in generator_function(trace_filter_coros, aiter=True):
|
|
772
|
+
traces.extend(chunk)
|
|
773
|
+
|
|
774
|
+
if traces:
|
|
775
|
+
tasks = []
|
|
776
|
+
while traces:
|
|
777
|
+
tasks.extend(
|
|
778
|
+
create_task(load(trace, load_prices=True)) for trace in traces[:5000]
|
|
779
|
+
)
|
|
780
|
+
traces = traces[5000:]
|
|
781
|
+
# let the tasks start sending calls to your node now
|
|
782
|
+
# without waiting for all tasks to be created
|
|
783
|
+
await yield_to_loop()
|
|
784
|
+
|
|
785
|
+
async for internal_transfer in a_sync.as_completed(
|
|
786
|
+
tasks, aiter=True, tqdm=True, desc=f"Internal Transfers {address}"
|
|
787
|
+
):
|
|
788
|
+
if internal_transfer is not None:
|
|
789
|
+
if mem_cache:
|
|
790
|
+
append_transfer(internal_transfer)
|
|
791
|
+
yield internal_transfer
|
|
792
|
+
|
|
793
|
+
done += 1
|
|
794
|
+
if done % 1000 == 0:
|
|
795
|
+
await yield_to_loop()
|
|
796
|
+
|
|
797
|
+
else:
|
|
798
|
+
async for chunk in generator_function(trace_filter_coros, aiter=True):
|
|
799
|
+
for trace in chunk:
|
|
800
|
+
internal_transfer = await load(trace, load_prices=False)
|
|
801
|
+
if internal_transfer is not None:
|
|
802
|
+
if mem_cache:
|
|
803
|
+
append_transfer(internal_transfer)
|
|
804
|
+
yield internal_transfer
|
|
805
|
+
|
|
806
|
+
done += 1
|
|
807
|
+
if done % 1000 == 0:
|
|
808
|
+
await yield_to_loop()
|
|
809
|
+
|
|
810
|
+
if mem_cache and internal_transfers:
|
|
811
|
+
self.objects.extend(internal_transfers)
|
|
812
|
+
self.objects.sort(key=lambda t: (t.block_number, t.transaction_index))
|
|
813
|
+
|
|
814
|
+
if self.cached_from is None or start_block < self.cached_from:
|
|
815
|
+
self.cached_from = start_block
|
|
816
|
+
if self.cached_thru is None or end_block > self.cached_thru:
|
|
817
|
+
self.cached_thru = end_block
|
|
818
|
+
|
|
819
|
+
|
|
820
|
+
_yield_tokens_semaphore = a_sync.Semaphore(
|
|
821
|
+
10, name="eth_portfolio._ledgers.address._yield_tokens_semaphore"
|
|
822
|
+
)
|
|
823
|
+
|
|
824
|
+
|
|
825
|
+
class TokenTransfersList(PandableList[TokenTransfer]):
|
|
826
|
+
"""
|
|
827
|
+
A list subclass for token transfer entries that can convert to a :class:`DataFrame`.
|
|
828
|
+
"""
|
|
829
|
+
|
|
830
|
+
|
|
831
|
+
class AddressTokenTransfersLedger(AddressLedgerBase[TokenTransfersList, TokenTransfer]):
|
|
832
|
+
"""
|
|
833
|
+
A ledger for managing token transfer entries.
|
|
834
|
+
"""
|
|
835
|
+
|
|
836
|
+
_list_type = TokenTransfersList
|
|
837
|
+
__slots__ = ("_transfers",)
|
|
838
|
+
|
|
839
|
+
def __init__(self, portfolio_address: "PortfolioAddress"):
|
|
840
|
+
"""
|
|
841
|
+
Initializes the AddressTokenTransfersLedger instance.
|
|
842
|
+
|
|
843
|
+
Args:
|
|
844
|
+
portfolio_address: The :class:`~eth_portfolio.address.PortfolioAddress` this ledger belongs to.
|
|
845
|
+
"""
|
|
846
|
+
super().__init__(portfolio_address)
|
|
847
|
+
self._transfers = TokenTransfers(
|
|
848
|
+
self.address, self.portfolio_address._start_block, load_prices=self.load_prices
|
|
849
|
+
)
|
|
850
|
+
"""
|
|
851
|
+
TokenTransfers: Instance for handling token transfer operations.
|
|
852
|
+
"""
|
|
853
|
+
|
|
854
|
+
@stuck_coro_debugger
|
|
855
|
+
async def list_tokens_at_block(self, block: Optional[int] = None) -> List[ERC20]:
|
|
856
|
+
"""
|
|
857
|
+
Lists the tokens held at a specific block.
|
|
858
|
+
|
|
859
|
+
Args:
|
|
860
|
+
block (Optional[int], optional): The block number. Defaults to None.
|
|
861
|
+
|
|
862
|
+
Returns:
|
|
863
|
+
List[ERC20]: The list of ERC20 tokens.
|
|
864
|
+
|
|
865
|
+
Examples:
|
|
866
|
+
>>> tokens = await ledger.list_tokens_at_block(12345678)
|
|
867
|
+
"""
|
|
868
|
+
return [token async for token in self._yield_tokens_at_block(block)]
|
|
869
|
+
|
|
870
|
+
async def _yield_tokens_at_block(self, block: Optional[int] = None) -> AsyncIterator[ERC20]:
|
|
871
|
+
"""
|
|
872
|
+
Yields the tokens held at a specific block.
|
|
873
|
+
|
|
874
|
+
Args:
|
|
875
|
+
block (Optional[int], optional): The block number. Defaults to None.
|
|
876
|
+
|
|
877
|
+
Yields:
|
|
878
|
+
AsyncIterator[ERC20]: An async iterator of ERC20 tokens.
|
|
879
|
+
"""
|
|
880
|
+
async with _yield_tokens_semaphore:
|
|
881
|
+
yielded = set()
|
|
882
|
+
async for transfer in self[:block]:
|
|
883
|
+
address = transfer.token_address
|
|
884
|
+
if address not in yielded:
|
|
885
|
+
yielded.add(address)
|
|
886
|
+
yield ERC20(address, asynchronous=self.asynchronous)
|
|
887
|
+
|
|
888
|
+
@stuck_coro_debugger
|
|
889
|
+
@set_end_block_if_none
|
|
890
|
+
async def _load_new_objects(self, start_block: Block, end_block: Block, mem_cache: bool) -> AsyncIterator[TokenTransfer]: # type: ignore [override]
|
|
891
|
+
"""
|
|
892
|
+
Loads new token transfer entries between the specified blocks.
|
|
893
|
+
|
|
894
|
+
Args:
|
|
895
|
+
start_block: The starting block number.
|
|
896
|
+
end_block: The ending block number.
|
|
897
|
+
|
|
898
|
+
Yields:
|
|
899
|
+
AsyncIterator[TokenTransfer]: An async iterator of token transfer entries.
|
|
900
|
+
"""
|
|
901
|
+
if mem_cache:
|
|
902
|
+
try:
|
|
903
|
+
start_block, end_block = self._check_blocks_against_cache(start_block, end_block)
|
|
904
|
+
except _exceptions.BlockRangeIsCached:
|
|
905
|
+
return
|
|
906
|
+
except _exceptions.BlockRangeOutOfBounds as e:
|
|
907
|
+
await e.load_remaining()
|
|
908
|
+
return
|
|
909
|
+
|
|
910
|
+
if tasks := [
|
|
911
|
+
task
|
|
912
|
+
async for task in self._transfers.yield_thru_block(end_block)
|
|
913
|
+
if start_block <= task.block # type: ignore [attr-defined]
|
|
914
|
+
]:
|
|
915
|
+
token_transfers = []
|
|
916
|
+
append_token_transfer = token_transfers.append
|
|
917
|
+
done = 0
|
|
918
|
+
async for token_transfer in a_sync.as_completed(
|
|
919
|
+
tasks, aiter=True, tqdm=True, desc=f"Token Transfers {self.address}"
|
|
920
|
+
):
|
|
921
|
+
if token_transfer:
|
|
922
|
+
if mem_cache:
|
|
923
|
+
append_token_transfer(token_transfer)
|
|
924
|
+
yield token_transfer
|
|
925
|
+
|
|
926
|
+
# Don't let the event loop get congested
|
|
927
|
+
done += 1
|
|
928
|
+
if done % 100 == 0:
|
|
929
|
+
await yield_to_loop()
|
|
930
|
+
|
|
931
|
+
if mem_cache and token_transfers:
|
|
932
|
+
self.objects.extend(token_transfers)
|
|
933
|
+
self.objects.sort(key=lambda t: (t.block_number, t.transaction_index, t.log_index))
|
|
934
|
+
|
|
935
|
+
if self.cached_from is None or start_block < self.cached_from:
|
|
936
|
+
self.cached_from = start_block
|
|
937
|
+
if self.cached_thru is None or end_block > self.cached_thru:
|
|
938
|
+
self.cached_thru = end_block
|