abstract-block-dumper 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,10 @@
1
+ from collections.abc import Iterator
1
2
  from datetime import timedelta
2
3
  from typing import Any
3
4
 
4
5
  from django.conf import settings
5
6
  from django.db import transaction
7
+ from django.db.models import Max
6
8
  from django.db.models.query import QuerySet
7
9
  from django.utils import timezone
8
10
 
@@ -10,19 +12,21 @@ import abstract_block_dumper._internal.services.utils as abd_utils
10
12
  import abstract_block_dumper.models as abd_models
11
13
 
12
14
 
13
- def get_ready_to_retry_attempts() -> QuerySet[abd_models.TaskAttempt]:
14
- return abd_models.TaskAttempt.objects.filter(
15
- next_retry_at__isnull=False,
16
- next_retry_at__lte=timezone.now(),
17
- attempt_count__lt=abd_utils.get_max_attempt_limit(),
18
- ).exclude(
19
- status=abd_models.TaskAttempt.Status.SUCCESS,
15
+ def get_ready_to_retry_attempts() -> Iterator[abd_models.TaskAttempt]:
16
+ return (
17
+ abd_models.TaskAttempt.objects.filter(
18
+ next_retry_at__isnull=False,
19
+ next_retry_at__lte=timezone.now(),
20
+ attempt_count__lt=abd_utils.get_max_attempt_limit(),
21
+ )
22
+ .exclude(
23
+ status=abd_models.TaskAttempt.Status.SUCCESS,
24
+ )
25
+ .iterator()
20
26
  )
21
27
 
22
28
 
23
29
  def executed_block_numbers(executable_path: str, args_json: str, from_block: int, to_block: int) -> set[int]:
24
- # Use iterator() to avoid Django's QuerySet caching which causes memory leaks
25
- # during long-running backfill operations
26
30
  block_numbers = (
27
31
  abd_models.TaskAttempt.objects.filter(
28
32
  executable_path=executable_path,
@@ -151,7 +155,5 @@ def task_create_or_get_pending(
151
155
 
152
156
 
153
157
  def get_the_latest_executed_block_number() -> int | None:
154
- qs = abd_models.TaskAttempt.objects.order_by("-block_number").first()
155
- if qs:
156
- return qs.block_number
157
- return None
158
+ result = abd_models.TaskAttempt.objects.aggregate(max_block=Max("block_number"))
159
+ return result["max_block"]
File without changes
@@ -0,0 +1,119 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ import bittensor as bt
6
+ import structlog
7
+
8
+ import abstract_block_dumper._internal.services.utils as abd_utils
9
+
10
+ if TYPE_CHECKING:
11
+ import types
12
+
13
+ logger = structlog.get_logger(__name__)
14
+
15
+
16
+ # Blocks older than this threshold from current head require archive network
17
+ ARCHIVE_BLOCK_THRESHOLD = 300
18
+
19
+
20
+ class BittensorConnectionClient:
21
+ """
22
+ Manages connections to regular and archive Bittensor subtensor networks.
23
+
24
+ Supports context manager protocol for safe connection cleanup:
25
+ with BittensorConnectionClient(network="finney") as client:
26
+ block = client.subtensor.get_current_block()
27
+ """
28
+
29
+ def __init__(self, network: str) -> None:
30
+ self.network = network
31
+ self._subtensor: bt.Subtensor | None = None
32
+ self._archive_subtensor: bt.Subtensor | None = None
33
+ self._current_block_cache: int | None = None
34
+
35
+ def __enter__(self) -> BittensorConnectionClient:
36
+ """Context manager entry."""
37
+ return self
38
+
39
+ def __exit__(
40
+ self,
41
+ _exc_type: type[BaseException] | None,
42
+ _exc_val: BaseException | None,
43
+ _exc_tb: types.TracebackType | None,
44
+ ) -> None:
45
+ """Context manager exit - ensures connections are closed."""
46
+ self.close()
47
+
48
+ def close(self) -> None:
49
+ """Close all subtensor connections to prevent memory leaks."""
50
+ if self._subtensor is not None:
51
+ try:
52
+ self._subtensor.close()
53
+ except Exception:
54
+ logger.warning("Error closing subtensor connection", exc_info=True)
55
+ self._subtensor = None
56
+
57
+ if self._archive_subtensor is not None:
58
+ try:
59
+ self._archive_subtensor.close()
60
+ except Exception:
61
+ logger.warning("Error closing archive subtensor connection", exc_info=True)
62
+ self._archive_subtensor = None
63
+
64
+ self._current_block_cache = None
65
+ logger.debug("Subtensor connections closed")
66
+
67
+ def get_for_block(self, block_number: int) -> bt.Subtensor:
68
+ """Get the appropriate subtensor client for the given block number."""
69
+ raise NotImplementedError
70
+
71
+ @property
72
+ def subtensor(self) -> bt.Subtensor:
73
+ """Get the regular subtensor connection, creating it if needed."""
74
+ if self._subtensor is None:
75
+ self._subtensor = abd_utils.get_bittensor_client(self.network)
76
+ return self._subtensor
77
+
78
+ @subtensor.setter
79
+ def subtensor(self, value: bt.Subtensor | None) -> None:
80
+ """Set or reset the subtensor connection."""
81
+ self._subtensor = value
82
+
83
+ @property
84
+ def archive_subtensor(self) -> bt.Subtensor:
85
+ """Get the archive subtensor connection, creating it if needed."""
86
+ if self._archive_subtensor is None:
87
+ self._archive_subtensor = abd_utils.get_bittensor_client("archive")
88
+ return self._archive_subtensor
89
+
90
+ @archive_subtensor.setter
91
+ def archive_subtensor(self, value: bt.Subtensor | None) -> None:
92
+ """Set or reset the archive subtensor connection."""
93
+ self._archive_subtensor = value
94
+
95
+ def get_subtensor_for_block(self, block_number: int) -> bt.Subtensor:
96
+ """
97
+ Get the appropriate subtensor for the given block number.
98
+
99
+ Uses archive network for blocks older than ARCHIVE_BLOCK_THRESHOLD
100
+ from the current head.
101
+ """
102
+ if self._current_block_cache is None:
103
+ self._current_block_cache = self.subtensor.get_current_block()
104
+
105
+ blocks_behind = self._current_block_cache - block_number
106
+
107
+ if blocks_behind > ARCHIVE_BLOCK_THRESHOLD:
108
+ logger.debug(
109
+ "Using archive network for old block",
110
+ block_number=block_number,
111
+ blocks_behind=blocks_behind,
112
+ )
113
+ return self.archive_subtensor
114
+ return self.subtensor
115
+
116
+ def refresh_connections(self) -> None:
117
+ """Close and reset all subtensor connections to force re-establishment."""
118
+ self.close()
119
+ logger.info("Subtensor connections refreshed")
@@ -15,7 +15,7 @@ import structlog
15
15
 
16
16
  import abstract_block_dumper._internal.dal.django_dal as abd_dal
17
17
  import abstract_block_dumper._internal.services.utils as abd_utils
18
- from abstract_block_dumper._internal.services.block_processor import BlockProcessor, block_processor_factory
18
+ from abstract_block_dumper._internal.services.block_processor import BaseBlockProcessor, block_processor_factory
19
19
  from abstract_block_dumper._internal.services.metrics import (
20
20
  BlockProcessingTimer,
21
21
  increment_archive_network_usage,
@@ -36,8 +36,6 @@ logger = structlog.get_logger(__name__)
36
36
  # Blocks older than this threshold from current head require archive network
37
37
  ARCHIVE_BLOCK_THRESHOLD = 300
38
38
 
39
- # Progress logging interval
40
- PROGRESS_LOG_INTERVAL = 100
41
39
  ARCHIVE_NETWORK = "archive"
42
40
 
43
41
  # Memory cleanup interval (every N blocks)
@@ -59,7 +57,7 @@ class BackfillScheduler:
59
57
 
60
58
  def __init__(
61
59
  self,
62
- block_processor: BlockProcessor,
60
+ block_processor: BaseBlockProcessor,
63
61
  network: str,
64
62
  from_block: int,
65
63
  to_block: int,
@@ -281,16 +279,13 @@ class BackfillScheduler:
281
279
  if self._current_head_cache:
282
280
  set_block_lag("backfill", self._current_head_cache - block_number)
283
281
 
284
- # Log progress periodically
285
- if processed_count % PROGRESS_LOG_INTERVAL == 0:
286
- progress_pct = (processed_count / total_blocks) * 100
287
- logger.info(
288
- "Backfill progress",
289
- processed=processed_count,
290
- total=total_blocks,
291
- progress_percent=f"{progress_pct:.1f}%",
292
- current_block=block_number,
293
- )
282
+ # Log each block being processed
283
+ progress_pct = (processed_count / total_blocks) * 100
284
+ logger.info(
285
+ "Backfilling block",
286
+ block=block_number,
287
+ progress=f"{processed_count}/{total_blocks} ({progress_pct:.1f}%)",
288
+ )
294
289
 
295
290
  # Rate limiting between block submissions
296
291
  if block_number < self.to_block and self.rate_limit > 0:
@@ -1,4 +1,6 @@
1
+ import itertools
1
2
  import time
3
+ from typing import Protocol
2
4
 
3
5
  import structlog
4
6
  from django.db import transaction
@@ -12,6 +14,25 @@ from abstract_block_dumper.models import TaskAttempt
12
14
  logger = structlog.get_logger(__name__)
13
15
 
14
16
 
17
+ class BaseBlockProcessor(Protocol):
18
+ """Protocol defining the interface for block processors."""
19
+
20
+ executor: CeleryExecutor
21
+ registry: BaseRegistry
22
+
23
+ def process_block(self, block_number: int) -> None:
24
+ """Process a single block - executes registered tasks for this block only."""
25
+ ...
26
+
27
+ def process_registry_item(self, registry_item: RegistryItem, block_number: int) -> None:
28
+ """Process a single registry item for a given block."""
29
+ ...
30
+
31
+ def recover_failed_retries(self, poll_interval: int, batch_size: int | None = None) -> None:
32
+ """Recover failed tasks that are ready to be retried."""
33
+ ...
34
+
35
+
15
36
  class BlockProcessor:
16
37
  def __init__(self, executor: CeleryExecutor, registry: BaseRegistry) -> None:
17
38
  self.executor = executor
@@ -59,9 +80,9 @@ class BlockProcessor:
59
80
  retry_count = 0
60
81
  retry_attempts = abd_dal.get_ready_to_retry_attempts()
61
82
 
62
- # Apply batch size limit if specified
83
+ # Apply batch size limit if specified (use islice for iterator compatibility)
63
84
  if batch_size is not None:
64
- retry_attempts = retry_attempts[:batch_size]
85
+ retry_attempts = itertools.islice(retry_attempts, batch_size)
65
86
 
66
87
  for retry_attempt in retry_attempts:
67
88
  time.sleep(poll_interval)
@@ -147,7 +168,7 @@ class BlockProcessor:
147
168
  def block_processor_factory(
148
169
  executor: CeleryExecutor | None = None,
149
170
  registry: BaseRegistry | None = None,
150
- ) -> BlockProcessor:
171
+ ) -> BaseBlockProcessor:
151
172
  return BlockProcessor(
152
173
  executor=executor or CeleryExecutor(),
153
174
  registry=registry or task_registry,
@@ -1,12 +1,12 @@
1
1
  import time
2
+ from typing import Protocol
2
3
 
3
- import bittensor as bt
4
4
  import structlog
5
5
  from django.conf import settings
6
6
 
7
7
  import abstract_block_dumper._internal.dal.django_dal as abd_dal
8
- import abstract_block_dumper._internal.services.utils as abd_utils
9
- from abstract_block_dumper._internal.services.block_processor import BlockProcessor, block_processor_factory
8
+ from abstract_block_dumper._internal.providers.bittensor_client import BittensorConnectionClient
9
+ from abstract_block_dumper._internal.services.block_processor import BaseBlockProcessor, block_processor_factory
10
10
  from abstract_block_dumper._internal.services.metrics import (
11
11
  BlockProcessingTimer,
12
12
  increment_blocks_processed,
@@ -17,83 +17,49 @@ from abstract_block_dumper._internal.services.metrics import (
17
17
 
18
18
  logger = structlog.get_logger(__name__)
19
19
 
20
- # Blocks older than this threshold from current head require archive network
21
- ARCHIVE_BLOCK_THRESHOLD = 300
20
+
21
+ class BlockStateResolver(Protocol):
22
+ """Protocol defining the interface for block state resolvers."""
23
+
24
+ def get_starting_block(self) -> int:
25
+ """Determine which block to start processing from."""
26
+ ...
27
+
28
+
29
+ class DefaultBlockStateResolver:
30
+ """Default implementation that reads from settings and database."""
31
+
32
+ def __init__(self, bittensor_client: BittensorConnectionClient) -> None:
33
+ self.bittensor_client = bittensor_client
34
+
35
+ def get_starting_block(self) -> int:
36
+ start_setting = getattr(settings, "BLOCK_DUMPER_START_FROM_BLOCK", None)
37
+ if start_setting == "current":
38
+ return self.bittensor_client.subtensor.get_current_block()
39
+ if isinstance(start_setting, int):
40
+ return start_setting
41
+
42
+ # Default: resume from DB or current
43
+ return abd_dal.get_the_latest_executed_block_number() or self.bittensor_client.subtensor.get_current_block()
22
44
 
23
45
 
24
46
  class TaskScheduler:
25
47
  def __init__(
26
48
  self,
27
- block_processor: BlockProcessor,
28
- network: str,
49
+ block_processor: BaseBlockProcessor,
50
+ bittensor_client: BittensorConnectionClient,
51
+ state_resolver: BlockStateResolver,
29
52
  poll_interval: int,
30
53
  ) -> None:
31
54
  self.block_processor = block_processor
32
- self.network = network
33
55
  self.poll_interval = poll_interval
34
- self.last_processed_block = -1
56
+ self.bittensor_client = bittensor_client
57
+ self.last_processed_block = state_resolver.get_starting_block()
35
58
  self.is_running = False
36
- self._subtensor: bt.Subtensor | None = None
37
- self._archive_subtensor: bt.Subtensor | None = None
38
- self._current_block_cache: int | None = None
39
-
40
- @property
41
- def subtensor(self) -> bt.Subtensor:
42
- """Get the regular subtensor connection, creating it if needed."""
43
- if self._subtensor is None:
44
- self._subtensor = abd_utils.get_bittensor_client(self.network)
45
- return self._subtensor
46
-
47
- @subtensor.setter
48
- def subtensor(self, value: bt.Subtensor | None) -> None:
49
- """Set or reset the subtensor connection."""
50
- self._subtensor = value
51
-
52
- @property
53
- def archive_subtensor(self) -> bt.Subtensor:
54
- """Get the archive subtensor connection, creating it if needed."""
55
- if self._archive_subtensor is None:
56
- self._archive_subtensor = abd_utils.get_bittensor_client("archive")
57
- return self._archive_subtensor
58
-
59
- @archive_subtensor.setter
60
- def archive_subtensor(self, value: bt.Subtensor | None) -> None:
61
- """Set or reset the archive subtensor connection."""
62
- self._archive_subtensor = value
63
-
64
- def get_subtensor_for_block(self, block_number: int) -> bt.Subtensor:
65
- """
66
- Get the appropriate subtensor for the given block number.
67
-
68
- Uses archive network for blocks older than ARCHIVE_BLOCK_THRESHOLD
69
- from the current head.
70
- """
71
- if self._current_block_cache is None:
72
- self._current_block_cache = self.subtensor.get_current_block()
73
-
74
- blocks_behind = self._current_block_cache - block_number
75
-
76
- if blocks_behind > ARCHIVE_BLOCK_THRESHOLD:
77
- logger.debug(
78
- "Using archive network for old block",
79
- block_number=block_number,
80
- blocks_behind=blocks_behind,
81
- )
82
- return self.archive_subtensor
83
- return self.subtensor
84
-
85
- def refresh_connections(self) -> None:
86
- """Reset all subtensor connections to force re-establishment."""
87
- self._subtensor = None
88
- self._archive_subtensor = None
89
- self._current_block_cache = None
90
- logger.info("Subtensor connections reset")
91
59
 
92
60
  def start(self) -> None:
93
61
  self.is_running = True
94
62
 
95
- self.initialize_last_block()
96
-
97
63
  registered_tasks_count = len(self.block_processor.registry.get_functions())
98
64
  set_registered_tasks(registered_tasks_count)
99
65
 
@@ -105,12 +71,7 @@ class TaskScheduler:
105
71
 
106
72
  while self.is_running:
107
73
  try:
108
- if self._current_block_cache is not None:
109
- self.subtensor = self.get_subtensor_for_block(self._current_block_cache)
110
-
111
- # Update current block cache for archive network decision
112
- self._current_block_cache = self.subtensor.get_current_block()
113
- current_block = self._current_block_cache
74
+ current_block = self.bittensor_client.subtensor.get_current_block()
114
75
 
115
76
  # Only process the current head block, skip if already processed
116
77
  if current_block != self.last_processed_block:
@@ -129,39 +90,14 @@ class TaskScheduler:
129
90
  self.stop()
130
91
  break
131
92
  except Exception:
132
- logger.error("Fatal scheduler error", exc_info=True)
133
- # resume the loop even if task failed
93
+ logger.exception("Error in TaskScheduler loop")
134
94
  time.sleep(self.poll_interval)
135
95
 
136
96
  def stop(self) -> None:
137
97
  self.is_running = False
98
+ self.bittensor_client.close()
138
99
  logger.info("TaskScheduler stopped.")
139
100
 
140
- def initialize_last_block(self) -> None:
141
- # Safe getattr in case setting is not defined
142
- start_from_block_setting = getattr(settings, "BLOCK_DUMPER_START_FROM_BLOCK", None)
143
-
144
- if start_from_block_setting is not None:
145
- if start_from_block_setting == "current":
146
- self.last_processed_block = self.subtensor.get_current_block()
147
- logger.info("Starting from current blockchain block", block_number=self.last_processed_block)
148
-
149
- elif isinstance(start_from_block_setting, int):
150
- self.last_processed_block = start_from_block_setting
151
- logger.info("Starting from configured block", block_number=self.last_processed_block)
152
- else:
153
- error_msg = f"Invalid BLOCK_DUMPER_START_FROM_BLOCK value: {start_from_block_setting}"
154
- raise ValueError(error_msg)
155
- else:
156
- # Default behavior - resume from database
157
- last_block_number = abd_dal.get_the_latest_executed_block_number()
158
-
159
- self.last_processed_block = last_block_number or self.subtensor.get_current_block()
160
- logger.info(
161
- "Resume from the last database block or start from the current block",
162
- last_processed_block=self.last_processed_block,
163
- )
164
-
165
101
 
166
102
  def task_scheduler_factory(network: str = "finney") -> TaskScheduler:
167
103
  """
@@ -171,8 +107,11 @@ def task_scheduler_factory(network: str = "finney") -> TaskScheduler:
171
107
  network (str): Bittensor network name. Defaults to "finney"
172
108
 
173
109
  """
110
+ bittensor_client = BittensorConnectionClient(network=network)
111
+ state_resolver = DefaultBlockStateResolver(bittensor_client=bittensor_client)
174
112
  return TaskScheduler(
175
113
  block_processor=block_processor_factory(),
176
- network=network,
177
- poll_interval=getattr(settings, "BLOCK_DUMPER_POLL_INTERVAL", 1),
114
+ poll_interval=getattr(settings, "BLOCK_DUMPER_POLL_INTERVAL", 5),
115
+ bittensor_client=bittensor_client,
116
+ state_resolver=state_resolver,
178
117
  )
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.1.0'
32
- __version_tuple__ = version_tuple = (0, 1, 0)
31
+ __version__ = version = '0.1.2'
32
+ __version_tuple__ = version_tuple = (0, 1, 2)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstract-block-dumper
3
- Version: 0.1.0
3
+ Version: 0.1.2
4
4
  Project-URL: Source, https://github.com/bactensor/abstract-block-dumper
5
5
  Project-URL: Issue Tracker, https://github.com/bactensor/abstract-block-dumper/issues
6
6
  Author-email: Reef Technologies <opensource@reef.pl>
@@ -1,5 +1,5 @@
1
1
  abstract_block_dumper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- abstract_block_dumper/_version.py,sha256=5jwwVncvCiTnhOedfkzzxmxsggwmTBORdFL_4wq0ZeY,704
2
+ abstract_block_dumper/_version.py,sha256=Ok5oAXdWgR9aghaFXTafTeDW6sYO3uVe6d2Nket57R4,704
3
3
  abstract_block_dumper/admin.py,sha256=3J3I_QOKFgfMNpTXW-rTQGO_q5Ls6uNuL0FkPVdIsYg,1654
4
4
  abstract_block_dumper/apps.py,sha256=DXATdrjsL3T2IletTbKeD6unr8ScLaxg7wz0nAHTAns,215
5
5
  abstract_block_dumper/models.py,sha256=MO9824dmHB6xF3PrFE_RERh7whVjQtS4tt6QA0wSbg0,2022
@@ -8,14 +8,16 @@ abstract_block_dumper/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm
8
8
  abstract_block_dumper/_internal/discovery.py,sha256=sISOL8vq6rC0pOndrCfWKDZjyYwzzZIChG-BH9mteq0,745
9
9
  abstract_block_dumper/_internal/exceptions.py,sha256=jVXQ8b3gneno2XYvO0XisJPMlkAWb6H5u10egIpPJ4k,335
10
10
  abstract_block_dumper/_internal/dal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- abstract_block_dumper/_internal/dal/django_dal.py,sha256=i9jocanfptjXw5lfE2xBYvx5mo1g98IoMjlS-WjGP88,5623
11
+ abstract_block_dumper/_internal/dal/django_dal.py,sha256=QbDsikUthIAhVC_FwSynUUdQL3OWlCo3_Cg65M91Cb4,5618
12
12
  abstract_block_dumper/_internal/dal/memory_registry.py,sha256=m9Yms-cuemi9_5q_Kn_zsJnxDPEiuAUkESIAltD60QY,2943
13
+ abstract_block_dumper/_internal/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ abstract_block_dumper/_internal/providers/bittensor_client.py,sha256=wlKjFrGN4Q2DfQyD_Fx-eH83ZMB6AbzLs5keYq6FGUw,4124
13
15
  abstract_block_dumper/_internal/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- abstract_block_dumper/_internal/services/backfill_scheduler.py,sha256=XgsVYXaz6pR4PBA9giesjhR74x1qLX2281-eQgM5qhs,16311
15
- abstract_block_dumper/_internal/services/block_processor.py,sha256=NC7p1oD38FpaZb6EbykBolP32uY069abumOvXrjOBV0,6644
16
+ abstract_block_dumper/_internal/services/backfill_scheduler.py,sha256=3X9NRXCamnb1jUI47sUXqmsrQ1nBzK0BLKe5CeLt00E,16091
17
+ abstract_block_dumper/_internal/services/block_processor.py,sha256=P8_LZR4ZSyNKbtnqFbAUkpT1XmEh9yX5Pgj5J__IwdA,7409
16
18
  abstract_block_dumper/_internal/services/executor.py,sha256=WhpHhOAi4cI-qdEE8-DSt9xZwooOpSc9_uDMQBBoHUM,2317
17
19
  abstract_block_dumper/_internal/services/metrics.py,sha256=Gg-PQYZ98caaS52wm1EqhtPURXlfrVjk2t3-8nccqfo,7821
18
- abstract_block_dumper/_internal/services/scheduler.py,sha256=pFVNV1YBHujNIRW9kq_1bxaa_W1Bn_pr7DrgwKaUItw,6779
20
+ abstract_block_dumper/_internal/services/scheduler.py,sha256=BIQ7c-HYSebW3CKq5ynsMZjULEO9c5YP0qWFN1aqg24,4164
19
21
  abstract_block_dumper/_internal/services/utils.py,sha256=QZxdQyWIcUnezyVmegS4g3x3BoB3-oijYJ_i9nLQWHo,1140
20
22
  abstract_block_dumper/management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
23
  abstract_block_dumper/management/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -27,6 +29,6 @@ abstract_block_dumper/v1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
27
29
  abstract_block_dumper/v1/celery.py,sha256=X4IqVs5i6ZpyY7fy1SqMZgsZy4SXP-jK2qG-FYnjU38,1722
28
30
  abstract_block_dumper/v1/decorators.py,sha256=yQglsy1dU1u7ShwaTqZLahDcybHmetibTIOi53o_ZOM,9829
29
31
  abstract_block_dumper/v1/tasks.py,sha256=u9iMYdDUqzYT3yPrNwZecHnlweZ3yFipV9BcIWHCbus,2647
30
- abstract_block_dumper-0.1.0.dist-info/METADATA,sha256=UypVClIjNRQlbzkGmmR2Indrw3c-2gbrlbjHaXin_ys,12993
31
- abstract_block_dumper-0.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
32
- abstract_block_dumper-0.1.0.dist-info/RECORD,,
32
+ abstract_block_dumper-0.1.2.dist-info/METADATA,sha256=6y5tq_8Wp3JNHuYATBIx_XQ2I0mXqBU5tgyf97rHahc,12993
33
+ abstract_block_dumper-0.1.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
34
+ abstract_block_dumper-0.1.2.dist-info/RECORD,,