abstract-block-dumper 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,10 @@
1
+ from collections.abc import Iterator
1
2
  from datetime import timedelta
2
3
  from typing import Any
3
4
 
4
5
  from django.conf import settings
5
6
  from django.db import transaction
7
+ from django.db.models import Max
6
8
  from django.db.models.query import QuerySet
7
9
  from django.utils import timezone
8
10
 
@@ -10,19 +12,21 @@ import abstract_block_dumper._internal.services.utils as abd_utils
10
12
  import abstract_block_dumper.models as abd_models
11
13
 
12
14
 
13
- def get_ready_to_retry_attempts() -> QuerySet[abd_models.TaskAttempt]:
14
- return abd_models.TaskAttempt.objects.filter(
15
- next_retry_at__isnull=False,
16
- next_retry_at__lte=timezone.now(),
17
- attempt_count__lt=abd_utils.get_max_attempt_limit(),
18
- ).exclude(
19
- status=abd_models.TaskAttempt.Status.SUCCESS,
15
+ def get_ready_to_retry_attempts() -> Iterator[abd_models.TaskAttempt]:
16
+ return (
17
+ abd_models.TaskAttempt.objects.filter(
18
+ next_retry_at__isnull=False,
19
+ next_retry_at__lte=timezone.now(),
20
+ attempt_count__lt=abd_utils.get_max_attempt_limit(),
21
+ )
22
+ .exclude(
23
+ status=abd_models.TaskAttempt.Status.SUCCESS,
24
+ )
25
+ .iterator()
20
26
  )
21
27
 
22
28
 
23
29
  def executed_block_numbers(executable_path: str, args_json: str, from_block: int, to_block: int) -> set[int]:
24
- # Use iterator() to avoid Django's QuerySet caching which causes memory leaks
25
- # during long-running backfill operations
26
30
  block_numbers = (
27
31
  abd_models.TaskAttempt.objects.filter(
28
32
  executable_path=executable_path,
@@ -151,7 +155,5 @@ def task_create_or_get_pending(
151
155
 
152
156
 
153
157
  def get_the_latest_executed_block_number() -> int | None:
154
- qs = abd_models.TaskAttempt.objects.order_by("-block_number").first()
155
- if qs:
156
- return qs.block_number
157
- return None
158
+ result = abd_models.TaskAttempt.objects.aggregate(max_block=Max("block_number"))
159
+ return result["max_block"]
File without changes
@@ -0,0 +1,78 @@
1
+ import bittensor as bt
2
+ import structlog
3
+
4
+ import abstract_block_dumper._internal.services.utils as abd_utils
5
+
6
+ logger = structlog.get_logger(__name__)
7
+
8
+
9
+ # Blocks older than this threshold from current head require archive network
10
+ ARCHIVE_BLOCK_THRESHOLD = 300
11
+
12
+
13
+ class BittensorConnectionClient:
14
+ """
15
+ Manages connections to regular and archive Bittensor subtensor networks.
16
+ """
17
+
18
+ def __init__(self, network: str) -> None:
19
+ self.network = network
20
+ self._subtensor: bt.Subtensor | None = None
21
+ self._archive_subtensor: bt.Subtensor | None = None
22
+ self._current_block_cache: int | None = None
23
+
24
+ def get_for_block(self, block_number: int) -> bt.Subtensor:
25
+ """Get the appropriate subtensor client for the given block number."""
26
+ raise NotImplementedError
27
+
28
+ @property
29
+ def subtensor(self) -> bt.Subtensor:
30
+ """Get the regular subtensor connection, creating it if needed."""
31
+ if self._subtensor is None:
32
+ self._subtensor = abd_utils.get_bittensor_client(self.network)
33
+ return self._subtensor
34
+
35
+ @subtensor.setter
36
+ def subtensor(self, value: bt.Subtensor | None) -> None:
37
+ """Set or reset the subtensor connection."""
38
+ self._subtensor = value
39
+
40
+ @property
41
+ def archive_subtensor(self) -> bt.Subtensor:
42
+ """Get the archive subtensor connection, creating it if needed."""
43
+ if self._archive_subtensor is None:
44
+ self._archive_subtensor = abd_utils.get_bittensor_client("archive")
45
+ return self._archive_subtensor
46
+
47
+ @archive_subtensor.setter
48
+ def archive_subtensor(self, value: bt.Subtensor | None) -> None:
49
+ """Set or reset the archive subtensor connection."""
50
+ self._archive_subtensor = value
51
+
52
+ def get_subtensor_for_block(self, block_number: int) -> bt.Subtensor:
53
+ """
54
+ Get the appropriate subtensor for the given block number.
55
+
56
+ Uses archive network for blocks older than ARCHIVE_BLOCK_THRESHOLD
57
+ from the current head.
58
+ """
59
+ if self._current_block_cache is None:
60
+ self._current_block_cache = self.subtensor.get_current_block()
61
+
62
+ blocks_behind = self._current_block_cache - block_number
63
+
64
+ if blocks_behind > ARCHIVE_BLOCK_THRESHOLD:
65
+ logger.debug(
66
+ "Using archive network for old block",
67
+ block_number=block_number,
68
+ blocks_behind=blocks_behind,
69
+ )
70
+ return self.archive_subtensor
71
+ return self.subtensor
72
+
73
+ def refresh_connections(self) -> None:
74
+ """Reset all subtensor connections to force re-establishment."""
75
+ self._subtensor = None
76
+ self._archive_subtensor = None
77
+ self._current_block_cache = None
78
+ logger.info("Subtensor connections reset")
@@ -15,7 +15,7 @@ import structlog
15
15
 
16
16
  import abstract_block_dumper._internal.dal.django_dal as abd_dal
17
17
  import abstract_block_dumper._internal.services.utils as abd_utils
18
- from abstract_block_dumper._internal.services.block_processor import BlockProcessor, block_processor_factory
18
+ from abstract_block_dumper._internal.services.block_processor import BaseBlockProcessor, block_processor_factory
19
19
  from abstract_block_dumper._internal.services.metrics import (
20
20
  BlockProcessingTimer,
21
21
  increment_archive_network_usage,
@@ -59,7 +59,7 @@ class BackfillScheduler:
59
59
 
60
60
  def __init__(
61
61
  self,
62
- block_processor: BlockProcessor,
62
+ block_processor: BaseBlockProcessor,
63
63
  network: str,
64
64
  from_block: int,
65
65
  to_block: int,
@@ -1,4 +1,6 @@
1
+ import itertools
1
2
  import time
3
+ from typing import Protocol
2
4
 
3
5
  import structlog
4
6
  from django.db import transaction
@@ -12,6 +14,25 @@ from abstract_block_dumper.models import TaskAttempt
12
14
  logger = structlog.get_logger(__name__)
13
15
 
14
16
 
17
+ class BaseBlockProcessor(Protocol):
18
+ """Protocol defining the interface for block processors."""
19
+
20
+ executor: CeleryExecutor
21
+ registry: BaseRegistry
22
+
23
+ def process_block(self, block_number: int) -> None:
24
+ """Process a single block - executes registered tasks for this block only."""
25
+ ...
26
+
27
+ def process_registry_item(self, registry_item: RegistryItem, block_number: int) -> None:
28
+ """Process a single registry item for a given block."""
29
+ ...
30
+
31
+ def recover_failed_retries(self, poll_interval: int, batch_size: int | None = None) -> None:
32
+ """Recover failed tasks that are ready to be retried."""
33
+ ...
34
+
35
+
15
36
  class BlockProcessor:
16
37
  def __init__(self, executor: CeleryExecutor, registry: BaseRegistry) -> None:
17
38
  self.executor = executor
@@ -59,9 +80,9 @@ class BlockProcessor:
59
80
  retry_count = 0
60
81
  retry_attempts = abd_dal.get_ready_to_retry_attempts()
61
82
 
62
- # Apply batch size limit if specified
83
+ # Apply batch size limit if specified (use islice for iterator compatibility)
63
84
  if batch_size is not None:
64
- retry_attempts = retry_attempts[:batch_size]
85
+ retry_attempts = itertools.islice(retry_attempts, batch_size)
65
86
 
66
87
  for retry_attempt in retry_attempts:
67
88
  time.sleep(poll_interval)
@@ -147,7 +168,7 @@ class BlockProcessor:
147
168
  def block_processor_factory(
148
169
  executor: CeleryExecutor | None = None,
149
170
  registry: BaseRegistry | None = None,
150
- ) -> BlockProcessor:
171
+ ) -> BaseBlockProcessor:
151
172
  return BlockProcessor(
152
173
  executor=executor or CeleryExecutor(),
153
174
  registry=registry or task_registry,
@@ -1,12 +1,13 @@
1
1
  import time
2
+ from typing import Protocol
2
3
 
3
- import bittensor as bt
4
4
  import structlog
5
+ from django import db
5
6
  from django.conf import settings
6
7
 
7
8
  import abstract_block_dumper._internal.dal.django_dal as abd_dal
8
- import abstract_block_dumper._internal.services.utils as abd_utils
9
- from abstract_block_dumper._internal.services.block_processor import BlockProcessor, block_processor_factory
9
+ from abstract_block_dumper._internal.providers.bittensor_client import BittensorConnectionClient
10
+ from abstract_block_dumper._internal.services.block_processor import BaseBlockProcessor, block_processor_factory
10
11
  from abstract_block_dumper._internal.services.metrics import (
11
12
  BlockProcessingTimer,
12
13
  increment_blocks_processed,
@@ -15,85 +16,54 @@ from abstract_block_dumper._internal.services.metrics import (
15
16
  set_registered_tasks,
16
17
  )
17
18
 
19
+ # Refresh bittensor connections every N blocks to prevent memory leaks from internal caches
20
+ CONNECTION_REFRESH_INTERVAL = 1000
21
+
18
22
  logger = structlog.get_logger(__name__)
19
23
 
20
- # Blocks older than this threshold from current head require archive network
21
- ARCHIVE_BLOCK_THRESHOLD = 300
24
+
25
+ class BlockStateResolver(Protocol):
26
+ """Protocol defining the interface for block state resolvers."""
27
+
28
+ def get_starting_block(self) -> int:
29
+ """Determine which block to start processing from."""
30
+ ...
31
+
32
+
33
+ class DefaultBlockStateResolver:
34
+ """Default implementation that reads from settings and database."""
35
+
36
+ def __init__(self, bittensor_client: BittensorConnectionClient) -> None:
37
+ self.bittensor_client = bittensor_client
38
+
39
+ def get_starting_block(self) -> int:
40
+ start_setting = getattr(settings, "BLOCK_DUMPER_START_FROM_BLOCK", None)
41
+ if start_setting == "current":
42
+ return self.bittensor_client.subtensor.get_current_block()
43
+ if isinstance(start_setting, int):
44
+ return start_setting
45
+ # Default: resume from DB or current
46
+ return abd_dal.get_the_latest_executed_block_number() or self.bittensor_client.subtensor.get_current_block()
22
47
 
23
48
 
24
49
  class TaskScheduler:
25
50
  def __init__(
26
51
  self,
27
- block_processor: BlockProcessor,
28
- network: str,
52
+ block_processor: BaseBlockProcessor,
53
+ bittensor_client: BittensorConnectionClient,
54
+ state_resolver: BlockStateResolver,
29
55
  poll_interval: int,
30
56
  ) -> None:
31
57
  self.block_processor = block_processor
32
- self.network = network
33
58
  self.poll_interval = poll_interval
34
- self.last_processed_block = -1
59
+ self.bittensor_client = bittensor_client
60
+ self.last_processed_block = state_resolver.get_starting_block()
35
61
  self.is_running = False
36
- self._subtensor: bt.Subtensor | None = None
37
- self._archive_subtensor: bt.Subtensor | None = None
38
- self._current_block_cache: int | None = None
39
-
40
- @property
41
- def subtensor(self) -> bt.Subtensor:
42
- """Get the regular subtensor connection, creating it if needed."""
43
- if self._subtensor is None:
44
- self._subtensor = abd_utils.get_bittensor_client(self.network)
45
- return self._subtensor
46
-
47
- @subtensor.setter
48
- def subtensor(self, value: bt.Subtensor | None) -> None:
49
- """Set or reset the subtensor connection."""
50
- self._subtensor = value
51
-
52
- @property
53
- def archive_subtensor(self) -> bt.Subtensor:
54
- """Get the archive subtensor connection, creating it if needed."""
55
- if self._archive_subtensor is None:
56
- self._archive_subtensor = abd_utils.get_bittensor_client("archive")
57
- return self._archive_subtensor
58
-
59
- @archive_subtensor.setter
60
- def archive_subtensor(self, value: bt.Subtensor | None) -> None:
61
- """Set or reset the archive subtensor connection."""
62
- self._archive_subtensor = value
63
-
64
- def get_subtensor_for_block(self, block_number: int) -> bt.Subtensor:
65
- """
66
- Get the appropriate subtensor for the given block number.
67
-
68
- Uses archive network for blocks older than ARCHIVE_BLOCK_THRESHOLD
69
- from the current head.
70
- """
71
- if self._current_block_cache is None:
72
- self._current_block_cache = self.subtensor.get_current_block()
73
-
74
- blocks_behind = self._current_block_cache - block_number
75
-
76
- if blocks_behind > ARCHIVE_BLOCK_THRESHOLD:
77
- logger.debug(
78
- "Using archive network for old block",
79
- block_number=block_number,
80
- blocks_behind=blocks_behind,
81
- )
82
- return self.archive_subtensor
83
- return self.subtensor
84
-
85
- def refresh_connections(self) -> None:
86
- """Reset all subtensor connections to force re-establishment."""
87
- self._subtensor = None
88
- self._archive_subtensor = None
89
- self._current_block_cache = None
90
- logger.info("Subtensor connections reset")
62
+ self._blocks_since_refresh = 0
91
63
 
92
64
  def start(self) -> None:
93
65
  self.is_running = True
94
66
 
95
- self.initialize_last_block()
96
-
97
67
  registered_tasks_count = len(self.block_processor.registry.get_functions())
98
68
  set_registered_tasks(registered_tasks_count)
99
69
 
@@ -105,12 +75,7 @@ class TaskScheduler:
105
75
 
106
76
  while self.is_running:
107
77
  try:
108
- if self._current_block_cache is not None:
109
- self.subtensor = self.get_subtensor_for_block(self._current_block_cache)
110
-
111
- # Update current block cache for archive network decision
112
- self._current_block_cache = self.subtensor.get_current_block()
113
- current_block = self._current_block_cache
78
+ current_block = self.bittensor_client.subtensor.get_current_block()
114
79
 
115
80
  # Only process the current head block, skip if already processed
116
81
  if current_block != self.last_processed_block:
@@ -121,6 +86,11 @@ class TaskScheduler:
121
86
  increment_blocks_processed("realtime")
122
87
  set_block_lag("realtime", 0) # Head-only mode has no lag
123
88
  self.last_processed_block = current_block
89
+ self._blocks_since_refresh += 1
90
+
91
+ # Periodic memory cleanup
92
+ if self._blocks_since_refresh >= CONNECTION_REFRESH_INTERVAL:
93
+ self._perform_cleanup()
124
94
 
125
95
  time.sleep(self.poll_interval)
126
96
 
@@ -129,38 +99,23 @@ class TaskScheduler:
129
99
  self.stop()
130
100
  break
131
101
  except Exception:
132
- logger.error("Fatal scheduler error", exc_info=True)
133
- # resume the loop even if task failed
102
+ logger.exception("Error in TaskScheduler loop")
134
103
  time.sleep(self.poll_interval)
135
104
 
136
105
  def stop(self) -> None:
137
106
  self.is_running = False
138
107
  logger.info("TaskScheduler stopped.")
139
108
 
140
- def initialize_last_block(self) -> None:
141
- # Safe getattr in case setting is not defined
142
- start_from_block_setting = getattr(settings, "BLOCK_DUMPER_START_FROM_BLOCK", None)
143
-
144
- if start_from_block_setting is not None:
145
- if start_from_block_setting == "current":
146
- self.last_processed_block = self.subtensor.get_current_block()
147
- logger.info("Starting from current blockchain block", block_number=self.last_processed_block)
109
+ def _perform_cleanup(self) -> None:
110
+ """Perform periodic memory cleanup to prevent leaks in long-running processes."""
111
+ # Reset bittensor connections to clear internal caches
112
+ self.bittensor_client.refresh_connections()
148
113
 
149
- elif isinstance(start_from_block_setting, int):
150
- self.last_processed_block = start_from_block_setting
151
- logger.info("Starting from configured block", block_number=self.last_processed_block)
152
- else:
153
- error_msg = f"Invalid BLOCK_DUMPER_START_FROM_BLOCK value: {start_from_block_setting}"
154
- raise ValueError(error_msg)
155
- else:
156
- # Default behavior - resume from database
157
- last_block_number = abd_dal.get_the_latest_executed_block_number()
114
+ # Clear Django's query log (only accumulates if DEBUG=True)
115
+ db.reset_queries()
158
116
 
159
- self.last_processed_block = last_block_number or self.subtensor.get_current_block()
160
- logger.info(
161
- "Resume from the last database block or start from the current block",
162
- last_processed_block=self.last_processed_block,
163
- )
117
+ self._blocks_since_refresh = 0
118
+ logger.debug("Memory cleanup performed", blocks_processed=CONNECTION_REFRESH_INTERVAL)
164
119
 
165
120
 
166
121
  def task_scheduler_factory(network: str = "finney") -> TaskScheduler:
@@ -171,8 +126,11 @@ def task_scheduler_factory(network: str = "finney") -> TaskScheduler:
171
126
  network (str): Bittensor network name. Defaults to "finney"
172
127
 
173
128
  """
129
+ bittensor_client = BittensorConnectionClient(network=network)
130
+ state_resolver = DefaultBlockStateResolver(bittensor_client=bittensor_client)
174
131
  return TaskScheduler(
175
132
  block_processor=block_processor_factory(),
176
- network=network,
177
- poll_interval=getattr(settings, "BLOCK_DUMPER_POLL_INTERVAL", 1),
133
+ poll_interval=getattr(settings, "BLOCK_DUMPER_POLL_INTERVAL", 5),
134
+ bittensor_client=bittensor_client,
135
+ state_resolver=state_resolver,
178
136
  )
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.1.0'
32
- __version_tuple__ = version_tuple = (0, 1, 0)
31
+ __version__ = version = '0.1.1'
32
+ __version_tuple__ = version_tuple = (0, 1, 1)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstract-block-dumper
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Project-URL: Source, https://github.com/bactensor/abstract-block-dumper
5
5
  Project-URL: Issue Tracker, https://github.com/bactensor/abstract-block-dumper/issues
6
6
  Author-email: Reef Technologies <opensource@reef.pl>
@@ -1,5 +1,5 @@
1
1
  abstract_block_dumper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- abstract_block_dumper/_version.py,sha256=5jwwVncvCiTnhOedfkzzxmxsggwmTBORdFL_4wq0ZeY,704
2
+ abstract_block_dumper/_version.py,sha256=m8HxkqoKGw_wAJtc4ZokpJKNLXqp4zwnNhbnfDtro7w,704
3
3
  abstract_block_dumper/admin.py,sha256=3J3I_QOKFgfMNpTXW-rTQGO_q5Ls6uNuL0FkPVdIsYg,1654
4
4
  abstract_block_dumper/apps.py,sha256=DXATdrjsL3T2IletTbKeD6unr8ScLaxg7wz0nAHTAns,215
5
5
  abstract_block_dumper/models.py,sha256=MO9824dmHB6xF3PrFE_RERh7whVjQtS4tt6QA0wSbg0,2022
@@ -8,14 +8,16 @@ abstract_block_dumper/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm
8
8
  abstract_block_dumper/_internal/discovery.py,sha256=sISOL8vq6rC0pOndrCfWKDZjyYwzzZIChG-BH9mteq0,745
9
9
  abstract_block_dumper/_internal/exceptions.py,sha256=jVXQ8b3gneno2XYvO0XisJPMlkAWb6H5u10egIpPJ4k,335
10
10
  abstract_block_dumper/_internal/dal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- abstract_block_dumper/_internal/dal/django_dal.py,sha256=i9jocanfptjXw5lfE2xBYvx5mo1g98IoMjlS-WjGP88,5623
11
+ abstract_block_dumper/_internal/dal/django_dal.py,sha256=QbDsikUthIAhVC_FwSynUUdQL3OWlCo3_Cg65M91Cb4,5618
12
12
  abstract_block_dumper/_internal/dal/memory_registry.py,sha256=m9Yms-cuemi9_5q_Kn_zsJnxDPEiuAUkESIAltD60QY,2943
13
+ abstract_block_dumper/_internal/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ abstract_block_dumper/_internal/providers/bittensor_client.py,sha256=hkUdkhz8wOZInRloyySo3DDN4JSMEiPwLv0YV5IDZ7o,2798
13
15
  abstract_block_dumper/_internal/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- abstract_block_dumper/_internal/services/backfill_scheduler.py,sha256=XgsVYXaz6pR4PBA9giesjhR74x1qLX2281-eQgM5qhs,16311
15
- abstract_block_dumper/_internal/services/block_processor.py,sha256=NC7p1oD38FpaZb6EbykBolP32uY069abumOvXrjOBV0,6644
16
+ abstract_block_dumper/_internal/services/backfill_scheduler.py,sha256=uZqM4bSoMgNRKwBy-W3a7ydh2Z37Sum8cDRGo4T4KhA,16319
17
+ abstract_block_dumper/_internal/services/block_processor.py,sha256=P8_LZR4ZSyNKbtnqFbAUkpT1XmEh9yX5Pgj5J__IwdA,7409
16
18
  abstract_block_dumper/_internal/services/executor.py,sha256=WhpHhOAi4cI-qdEE8-DSt9xZwooOpSc9_uDMQBBoHUM,2317
17
19
  abstract_block_dumper/_internal/services/metrics.py,sha256=Gg-PQYZ98caaS52wm1EqhtPURXlfrVjk2t3-8nccqfo,7821
18
- abstract_block_dumper/_internal/services/scheduler.py,sha256=pFVNV1YBHujNIRW9kq_1bxaa_W1Bn_pr7DrgwKaUItw,6779
20
+ abstract_block_dumper/_internal/services/scheduler.py,sha256=pkcOlh0XsR-3sStEtTbiSpHFJhMG9tGIgSUFeH-FITk,5020
19
21
  abstract_block_dumper/_internal/services/utils.py,sha256=QZxdQyWIcUnezyVmegS4g3x3BoB3-oijYJ_i9nLQWHo,1140
20
22
  abstract_block_dumper/management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
23
  abstract_block_dumper/management/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -27,6 +29,6 @@ abstract_block_dumper/v1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
27
29
  abstract_block_dumper/v1/celery.py,sha256=X4IqVs5i6ZpyY7fy1SqMZgsZy4SXP-jK2qG-FYnjU38,1722
28
30
  abstract_block_dumper/v1/decorators.py,sha256=yQglsy1dU1u7ShwaTqZLahDcybHmetibTIOi53o_ZOM,9829
29
31
  abstract_block_dumper/v1/tasks.py,sha256=u9iMYdDUqzYT3yPrNwZecHnlweZ3yFipV9BcIWHCbus,2647
30
- abstract_block_dumper-0.1.0.dist-info/METADATA,sha256=UypVClIjNRQlbzkGmmR2Indrw3c-2gbrlbjHaXin_ys,12993
31
- abstract_block_dumper-0.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
32
- abstract_block_dumper-0.1.0.dist-info/RECORD,,
32
+ abstract_block_dumper-0.1.1.dist-info/METADATA,sha256=CVrs2LOv57Y4JOGxJgKsr1Xnnhl0i0wa534T-0hTU58,12993
33
+ abstract_block_dumper-0.1.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
34
+ abstract_block_dumper-0.1.1.dist-info/RECORD,,