abstract-block-dumper 0.0.6__py3-none-any.whl → 0.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,13 +21,19 @@ def get_ready_to_retry_attempts() -> QuerySet[abd_models.TaskAttempt]:
21
21
 
22
22
 
23
23
  def executed_block_numbers(executable_path: str, args_json: str, from_block: int, to_block: int) -> set[int]:
24
- block_numbers = abd_models.TaskAttempt.objects.filter(
25
- executable_path=executable_path,
26
- args_json=args_json,
27
- block_number__gte=from_block,
28
- block_number__lt=to_block,
29
- status=abd_models.TaskAttempt.Status.SUCCESS,
30
- ).values_list("block_number", flat=True)
24
+ # Use iterator() to avoid Django's QuerySet caching which causes memory leaks
25
+ # during long-running backfill operations
26
+ block_numbers = (
27
+ abd_models.TaskAttempt.objects.filter(
28
+ executable_path=executable_path,
29
+ args_json=args_json,
30
+ block_number__gte=from_block,
31
+ block_number__lt=to_block,
32
+ status=abd_models.TaskAttempt.Status.SUCCESS,
33
+ )
34
+ .values_list("block_number", flat=True)
35
+ .iterator()
36
+ )
31
37
  return set(block_numbers)
32
38
 
33
39
 
@@ -23,15 +23,14 @@ class RegistryItem:
23
23
  """Check if condition matches for given block and arguments."""
24
24
  try:
25
25
  return self.condition(block_number, **kwargs)
26
- except Exception as e:
27
- logger.error(
26
+ except Exception as exc:
27
+ logger.exception(
28
28
  "Condition evaluation failed",
29
29
  condition=self.function.__name__,
30
30
  block_number=block_number,
31
- exc_info=True,
32
31
  )
33
- msg = f"Failed to evaluate condition: {e}"
34
- raise ConditionEvaluationError(msg) from e
32
+ msg = "Failed to evaluate condition"
33
+ raise ConditionEvaluationError(msg) from exc
35
34
 
36
35
  def get_execution_args(self) -> list[dict[str, Any]]:
37
36
  """Get list of argument sets for execution."""
@@ -0,0 +1,438 @@
1
+ """
2
+ Backfill scheduler for historical block processing.
3
+
4
+ This module provides a dedicated scheduler for backfilling historical blocks
5
+ with rate limiting and automatic archive network switching.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import time
11
+ from dataclasses import dataclass
12
+ from typing import TYPE_CHECKING
13
+
14
+ import structlog
15
+
16
+ import abstract_block_dumper._internal.dal.django_dal as abd_dal
17
+ import abstract_block_dumper._internal.services.utils as abd_utils
18
+ from abstract_block_dumper._internal.services.block_processor import BlockProcessor, block_processor_factory
19
+ from abstract_block_dumper._internal.services.metrics import (
20
+ BlockProcessingTimer,
21
+ increment_blocks_processed,
22
+ set_backfill_progress,
23
+ set_current_block,
24
+ )
25
+ from abstract_block_dumper._internal.services.utils import serialize_args
26
+
27
+ if TYPE_CHECKING:
28
+ import bittensor as bt
29
+
30
+ from abstract_block_dumper._internal.dal.memory_registry import RegistryItem
31
+
32
+ logger = structlog.get_logger(__name__)
33
+
34
+ # Blocks older than this threshold from current head require archive network
35
+ ARCHIVE_BLOCK_THRESHOLD = 300
36
+
37
+ # Progress logging interval
38
+ PROGRESS_LOG_INTERVAL = 100
39
+ ARCHIVE_NETWORK = "archive"
40
+
41
+ # Memory cleanup interval (every N blocks)
42
+ MEMORY_CLEANUP_INTERVAL = 1000
43
+
44
+
45
+ @dataclass
46
+ class DryRunStats:
47
+ """Statistics for dry-run mode."""
48
+
49
+ total_blocks: int = 0
50
+ already_processed: int = 0
51
+ blocks_needing_tasks: int = 0
52
+ estimated_tasks: int = 0
53
+
54
+
55
+ class BackfillScheduler:
56
+ """Scheduler for backfilling historical blocks with rate limiting."""
57
+
58
+ def __init__(
59
+ self,
60
+ block_processor: BlockProcessor,
61
+ network: str,
62
+ from_block: int,
63
+ to_block: int,
64
+ rate_limit: float = 1.0,
65
+ dry_run: bool = False,
66
+ ) -> None:
67
+ """
68
+ Initialize the backfill scheduler.
69
+
70
+ Args:
71
+ block_processor: The block processor to use for task execution.
72
+ network: The bittensor network name (e.g., 'finney').
73
+ from_block: Starting block number (inclusive).
74
+ to_block: Ending block number (inclusive).
75
+ rate_limit: Seconds to sleep between processing each block.
76
+ dry_run: If True, preview what would be processed without executing.
77
+
78
+ """
79
+ self.block_processor = block_processor
80
+ self.network = network
81
+ self.from_block = from_block
82
+ self.to_block = to_block
83
+ self.rate_limit = rate_limit
84
+ self.dry_run = dry_run
85
+ self.is_running = False
86
+ self._subtensor: bt.Subtensor | None = None
87
+ self._archive_subtensor: bt.Subtensor | None = None
88
+ self._current_head_cache: int | None = None
89
+
90
+ @property
91
+ def subtensor(self) -> bt.Subtensor:
92
+ """Get the regular subtensor connection, creating it if needed."""
93
+ if self._subtensor is None:
94
+ self._subtensor = abd_utils.get_bittensor_client(self.network)
95
+ return self._subtensor
96
+
97
+ @property
98
+ def archive_subtensor(self) -> bt.Subtensor:
99
+ """Get the archive subtensor connection, creating it if needed."""
100
+ if self._archive_subtensor is None:
101
+ self._archive_subtensor = abd_utils.get_bittensor_client("archive")
102
+ return self._archive_subtensor
103
+
104
+ def get_subtensor_for_block(self, block_number: int) -> bt.Subtensor:
105
+ """
106
+ Get the appropriate subtensor for the given block number.
107
+
108
+ Uses archive network for blocks older than ARCHIVE_BLOCK_THRESHOLD
109
+ from the current head.
110
+ """
111
+ if self._current_head_cache is None:
112
+ self._current_head_cache = self.subtensor.get_current_block()
113
+
114
+ blocks_behind = self._current_head_cache - block_number
115
+
116
+ if blocks_behind > ARCHIVE_BLOCK_THRESHOLD:
117
+ logger.debug(
118
+ "Using archive network for old block",
119
+ block_number=block_number,
120
+ blocks_behind=blocks_behind,
121
+ )
122
+ return self.archive_subtensor
123
+ return self.subtensor
124
+
125
+ def _get_network_type_for_block(self, block_number: int) -> str:
126
+ """Get the network type string for a block (for display purposes)."""
127
+ if self._current_head_cache is None:
128
+ self._current_head_cache = self.subtensor.get_current_block()
129
+
130
+ blocks_behind = self._current_head_cache - block_number
131
+ return ARCHIVE_NETWORK if blocks_behind > ARCHIVE_BLOCK_THRESHOLD else self.network
132
+
133
+ def start(self) -> DryRunStats | None:
134
+ """
135
+ Start processing blocks from from_block to to_block.
136
+
137
+ Returns:
138
+ DryRunStats if dry_run is True, None otherwise.
139
+
140
+ """
141
+ self.is_running = True
142
+
143
+ # Refresh current head for accurate archive network decisions
144
+ self._current_head_cache = self.subtensor.get_current_block()
145
+
146
+ total_blocks = self.to_block - self.from_block + 1
147
+ network_type = self._get_network_type_for_block(self.from_block)
148
+
149
+ logger.info(
150
+ "BackfillScheduler starting",
151
+ from_block=self.from_block,
152
+ to_block=self.to_block,
153
+ total_blocks=total_blocks,
154
+ rate_limit=self.rate_limit,
155
+ dry_run=self.dry_run,
156
+ network_type=network_type,
157
+ current_head=self._current_head_cache,
158
+ )
159
+
160
+ if self.dry_run:
161
+ return self._run_dry_run()
162
+
163
+ self._run_backfill()
164
+ return None
165
+
166
+ def _run_dry_run(self) -> DryRunStats:
167
+ """
168
+ Run in dry-run mode to preview what would be processed.
169
+
170
+ Optimized to fetch all executed blocks in one query per registry item,
171
+ instead of querying for each block individually.
172
+ """
173
+ stats = DryRunStats(total_blocks=self.to_block - self.from_block + 1)
174
+
175
+ registry_items = self.block_processor.registry.get_functions()
176
+
177
+ # Pre-fetch all executed blocks for each registry item + args combination
178
+ # This reduces N queries (one per block) to M queries (one per registry item + args)
179
+ executed_blocks_cache: dict[tuple[str, str], set[int]] = {}
180
+
181
+ logger.info(
182
+ "Dry run: pre-fetching executed blocks",
183
+ from_block=self.from_block,
184
+ to_block=self.to_block,
185
+ registry_items_count=len(registry_items),
186
+ )
187
+
188
+ for registry_item in registry_items:
189
+ for args in registry_item.get_execution_args():
190
+ args_json = serialize_args(args)
191
+ cache_key = (registry_item.executable_path, args_json)
192
+
193
+ # Fetch all executed blocks in the range with a single query
194
+ executed_blocks_cache[cache_key] = abd_dal.executed_block_numbers(
195
+ registry_item.executable_path,
196
+ args_json,
197
+ self.from_block,
198
+ self.to_block + 1,
199
+ )
200
+
201
+ logger.info(
202
+ "Dry run: analyzing blocks",
203
+ cache_entries=len(executed_blocks_cache),
204
+ )
205
+
206
+ # Track which blocks have at least one task
207
+ blocks_with_tasks: set[int] = set()
208
+
209
+ for registry_item in registry_items:
210
+ for args in registry_item.get_execution_args():
211
+ args_json = serialize_args(args)
212
+ cache_key = (registry_item.executable_path, args_json)
213
+ executed_blocks = executed_blocks_cache[cache_key]
214
+
215
+ for block_number in range(self.from_block, self.to_block + 1):
216
+ if not self.is_running:
217
+ break
218
+
219
+ if block_number in executed_blocks:
220
+ continue
221
+
222
+ # Check if condition matches
223
+ try:
224
+ if registry_item.match_condition(block_number, **args):
225
+ stats.estimated_tasks += 1
226
+ blocks_with_tasks.add(block_number)
227
+ except Exception as exc:
228
+ logger.debug(
229
+ "Error evaluating match condition during dry run",
230
+ function_name=registry_item.function.__name__,
231
+ block_number=block_number,
232
+ args=args,
233
+ error=str(exc),
234
+ )
235
+
236
+ stats.blocks_needing_tasks = len(blocks_with_tasks)
237
+ stats.already_processed = stats.total_blocks - stats.blocks_needing_tasks
238
+
239
+ return stats
240
+
241
+ def _run_backfill(self) -> None:
242
+ """Run the actual backfill process."""
243
+ processed_count = 0
244
+ total_blocks = self.to_block - self.from_block + 1
245
+
246
+ # Set initial metrics
247
+ set_backfill_progress(self.from_block, self.to_block, self.from_block)
248
+
249
+ # Pre-fetch all executed blocks to avoid per-block DB queries
250
+ logger.info(
251
+ "Pre-fetching executed blocks",
252
+ from_block=self.from_block,
253
+ to_block=self.to_block,
254
+ )
255
+ executed_blocks_cache = self._prefetch_executed_blocks()
256
+ logger.info(
257
+ "Pre-fetch complete",
258
+ cache_entries=len(executed_blocks_cache),
259
+ )
260
+
261
+ try:
262
+ for block_number in range(self.from_block, self.to_block + 1):
263
+ if not self.is_running:
264
+ logger.info("BackfillScheduler stopping early", processed_count=processed_count)
265
+ break
266
+
267
+ try:
268
+ with BlockProcessingTimer(mode="backfill"):
269
+ self._process_block(block_number, executed_blocks_cache)
270
+
271
+ processed_count += 1
272
+
273
+ # Update metrics
274
+ set_current_block("backfill", block_number)
275
+ set_backfill_progress(self.from_block, self.to_block, block_number)
276
+ increment_blocks_processed("backfill")
277
+
278
+ # Log progress periodically
279
+ if processed_count % PROGRESS_LOG_INTERVAL == 0:
280
+ progress_pct = (processed_count / total_blocks) * 100
281
+ logger.info(
282
+ "Backfill progress",
283
+ processed=processed_count,
284
+ total=total_blocks,
285
+ progress_percent=f"{progress_pct:.1f}%",
286
+ current_block=block_number,
287
+ )
288
+
289
+ # Rate limiting between block submissions
290
+ if block_number < self.to_block and self.rate_limit > 0:
291
+ time.sleep(self.rate_limit)
292
+
293
+ except KeyboardInterrupt:
294
+ raise
295
+ except Exception:
296
+ logger.exception(
297
+ "Error processing block during backfill",
298
+ block_number=block_number,
299
+ )
300
+ # Continue with next block
301
+ time.sleep(self.rate_limit)
302
+
303
+ except KeyboardInterrupt:
304
+ logger.info(
305
+ "BackfillScheduler interrupted",
306
+ processed_count=processed_count,
307
+ last_block=self.from_block + processed_count - 1 if processed_count > 0 else self.from_block,
308
+ )
309
+ self.stop()
310
+
311
+ logger.info(
312
+ "BackfillScheduler completed",
313
+ processed_count=processed_count,
314
+ total_blocks=total_blocks,
315
+ )
316
+
317
+ def _prefetch_executed_blocks(self) -> dict[tuple[str, str], set[int]]:
318
+ """Pre-fetch all executed blocks for all registry items in the range."""
319
+ cache: dict[tuple[str, str], set[int]] = {}
320
+
321
+ for registry_item in self.block_processor.registry.get_functions():
322
+ for args in registry_item.get_execution_args():
323
+ args_json = serialize_args(args)
324
+ cache_key = (registry_item.executable_path, args_json)
325
+
326
+ cache[cache_key] = abd_dal.executed_block_numbers(
327
+ registry_item.executable_path,
328
+ args_json,
329
+ self.from_block,
330
+ self.to_block + 1,
331
+ )
332
+
333
+ return cache
334
+
335
+ def _process_block(
336
+ self,
337
+ block_number: int,
338
+ executed_blocks_cache: dict[tuple[str, str], set[int]],
339
+ ) -> None:
340
+ """Process a single block during backfill."""
341
+ for registry_item in self.block_processor.registry.get_functions():
342
+ try:
343
+ self._process_registry_item_for_backfill(
344
+ registry_item,
345
+ block_number,
346
+ executed_blocks_cache,
347
+ )
348
+ except Exception:
349
+ logger.exception(
350
+ "Error processing registry item during backfill",
351
+ function_name=registry_item.function.__name__,
352
+ block_number=block_number,
353
+ )
354
+
355
+ def _requires_archive_network(self, block_number: int) -> bool:
356
+ """Check if a block requires archive network based on age."""
357
+ if self._current_head_cache is None:
358
+ return False
359
+ blocks_behind = self._current_head_cache - block_number
360
+ return blocks_behind > ARCHIVE_BLOCK_THRESHOLD
361
+
362
+ def _process_registry_item_for_backfill(
363
+ self,
364
+ registry_item: RegistryItem,
365
+ block_number: int,
366
+ executed_blocks_cache: dict[tuple[str, str], set[int]],
367
+ ) -> None:
368
+ """Process a registry item for backfill - only submits if not already executed."""
369
+ for args in registry_item.get_execution_args():
370
+ args_json = serialize_args(args)
371
+ cache_key = (registry_item.executable_path, args_json)
372
+
373
+ # Check if already executed using pre-fetched cache
374
+ executed_blocks = executed_blocks_cache.get(cache_key, set())
375
+
376
+ if block_number in executed_blocks:
377
+ continue
378
+
379
+ # Check condition and execute
380
+ try:
381
+ if registry_item.match_condition(block_number, **args):
382
+ use_archive = self._requires_archive_network(block_number)
383
+ logger.debug(
384
+ "Backfilling block",
385
+ function_name=registry_item.function.__name__,
386
+ block_number=block_number,
387
+ args=args,
388
+ use_archive=use_archive,
389
+ )
390
+ self.block_processor.executor.execute(
391
+ registry_item,
392
+ block_number,
393
+ args,
394
+ use_archive=use_archive,
395
+ )
396
+ except Exception:
397
+ logger.exception(
398
+ "Error during backfill task execution",
399
+ function_name=registry_item.function.__name__,
400
+ block_number=block_number,
401
+ args=args,
402
+ )
403
+
404
+ def stop(self) -> None:
405
+ """Stop the backfill scheduler."""
406
+ self.is_running = False
407
+ logger.info("BackfillScheduler stopped")
408
+
409
+
410
+ def backfill_scheduler_factory(
411
+ from_block: int,
412
+ to_block: int,
413
+ network: str = "finney",
414
+ rate_limit: float = 1.0,
415
+ dry_run: bool = False,
416
+ ) -> BackfillScheduler:
417
+ """
418
+ Factory for BackfillScheduler.
419
+
420
+ Args:
421
+ from_block: Starting block number (inclusive).
422
+ to_block: Ending block number (inclusive).
423
+ network: Bittensor network name. Defaults to "finney".
424
+ rate_limit: Seconds to sleep between blocks. Defaults to 1.0.
425
+ dry_run: If True, preview without executing. Defaults to False.
426
+
427
+ Returns:
428
+ Configured BackfillScheduler instance.
429
+
430
+ """
431
+ return BackfillScheduler(
432
+ block_processor=block_processor_factory(),
433
+ network=network,
434
+ from_block=from_block,
435
+ to_block=to_block,
436
+ rate_limit=rate_limit,
437
+ dry_run=dry_run,
438
+ )
@@ -1,3 +1,5 @@
1
+ import time
2
+
1
3
  import structlog
2
4
  from django.db import transaction
3
5
 
@@ -5,7 +7,6 @@ import abstract_block_dumper._internal.dal.django_dal as abd_dal
5
7
  from abstract_block_dumper._internal.dal.memory_registry import BaseRegistry, RegistryItem, task_registry
6
8
  from abstract_block_dumper._internal.exceptions import ConditionEvaluationError
7
9
  from abstract_block_dumper._internal.services.executor import CeleryExecutor
8
- from abstract_block_dumper._internal.services.utils import serialize_args
9
10
  from abstract_block_dumper.models import TaskAttempt
10
11
 
11
12
  logger = structlog.get_logger(__name__)
@@ -18,16 +19,15 @@ class BlockProcessor:
18
19
  self._cleanup_phantom_tasks()
19
20
 
20
21
  def process_block(self, block_number: int) -> None:
22
+ """Process a single block - executes registered tasks for this block only."""
21
23
  for registry_item in self.registry.get_functions():
22
24
  try:
23
- self.process_backfill(registry_item, block_number)
24
25
  self.process_registry_item(registry_item, block_number)
25
26
  except Exception:
26
- logger.error(
27
+ logger.exception(
27
28
  "Error processing registry item",
28
29
  function_name=registry_item.function.__name__,
29
30
  block_number=block_number,
30
- exc_info=True,
31
31
  )
32
32
 
33
33
  def process_registry_item(self, registry_item: RegistryItem, block_number: int) -> None:
@@ -43,64 +43,28 @@ class BlockProcessor:
43
43
  )
44
44
  # Continue with other tasks
45
45
  except Exception:
46
- logger.error("Unexpected error processing task", exc_info=True)
47
-
48
- def process_backfill(self, registry_item: RegistryItem, current_block: int) -> None:
49
- if not registry_item.backfilling_lookback:
50
- return None
51
-
52
- start_block = max(0, current_block - registry_item.backfilling_lookback)
53
-
54
- logger.info(
55
- "Processing backfill",
56
- function_name=registry_item.function.__name__,
57
- start_block=start_block,
58
- current_block=current_block,
59
- lookback=registry_item.backfilling_lookback,
60
- )
61
-
62
- execution_args_list = registry_item.get_execution_args()
63
-
64
- for args in execution_args_list:
65
- args_json = serialize_args(args)
66
-
67
- executed_blocks = abd_dal.executed_block_numbers(
68
- registry_item.executable_path,
69
- args_json,
70
- start_block,
71
- current_block,
72
- )
73
-
74
- for block_number in range(start_block, current_block):
75
- if block_number in executed_blocks:
76
- continue
46
+ logger.exception("Unexpected error processing task")
77
47
 
78
- try:
79
- if registry_item.match_condition(block_number, **args):
80
- logger.debug(
81
- "Backfilling block",
82
- function_name=registry_item.function.__name__,
83
- block_number=block_number,
84
- args=args,
85
- )
86
- self.executor.execute(registry_item, block_number, args)
87
- except Exception:
88
- logger.error(
89
- "Error during backfill",
90
- function_name=registry_item.function.__name__,
91
- block_number=block_number,
92
- args=args,
93
- exc_info=True,
94
- )
95
-
96
- def recover_failed_retries(self) -> None:
48
+ def recover_failed_retries(self, poll_interval: int, batch_size: int | None = None) -> None:
97
49
  """
98
50
  Recover failed tasks that are ready to be retried.
99
51
 
100
52
  This handles tasks that may have been lost due to scheduler restarts.
53
+
54
+ Args:
55
+ poll_interval: Seconds to sleep between processing each retry.
56
+ batch_size: Maximum number of retries to process. If None, process all.
57
+
101
58
  """
102
59
  retry_count = 0
103
- for retry_attempt in abd_dal.get_ready_to_retry_attempts():
60
+ retry_attempts = abd_dal.get_ready_to_retry_attempts()
61
+
62
+ # Apply batch size limit if specified
63
+ if batch_size is not None:
64
+ retry_attempts = retry_attempts[:batch_size]
65
+
66
+ for retry_attempt in retry_attempts:
67
+ time.sleep(poll_interval)
104
68
  try:
105
69
  # Find the registry item to get celery_kwargs
106
70
  registry_item = self.registry.get_by_executable_path(retry_attempt.executable_path)
@@ -148,10 +112,9 @@ class BlockProcessor:
148
112
  attempt_count=task_attempt.attempt_count,
149
113
  )
150
114
  except Exception:
151
- logger.error(
115
+ logger.exception(
152
116
  "Failed to recover retry",
153
117
  task_id=retry_attempt.id,
154
- exc_info=True,
155
118
  )
156
119
  # Reload task to see current state after potential execution failure
157
120
  try:
@@ -10,7 +10,14 @@ logger = structlog.get_logger(__name__)
10
10
 
11
11
 
12
12
  class CeleryExecutor:
13
- def execute(self, registry_item: RegistryItem, block_number: int, args: dict[str, Any]) -> None:
13
+ def execute(
14
+ self,
15
+ registry_item: RegistryItem,
16
+ block_number: int,
17
+ args: dict[str, Any],
18
+ *,
19
+ use_archive: bool = False,
20
+ ) -> None:
14
21
  task_attempt, created = abd_dal.task_create_or_get_pending(
15
22
  block_number=block_number,
16
23
  executable_path=registry_item.executable_path,
@@ -26,6 +33,7 @@ class CeleryExecutor:
26
33
 
27
34
  task_kwargs = {
28
35
  "block_number": block_number,
36
+ "_use_archive_network": use_archive, # Runtime hint, not stored in DB
29
37
  **args,
30
38
  }
31
39
 
@@ -49,6 +57,8 @@ class CeleryExecutor:
49
57
  celery_kwargs=apply_async_kwargs,
50
58
  )
51
59
 
52
- celery_task = registry_item.function.apply_async(**apply_async_kwargs)
60
+ # Don't store AsyncResult to avoid memory accumulation during long runs
61
+ # The task ID can be retrieved from the task_attempt if needed
62
+ registry_item.function.apply_async(**apply_async_kwargs)
53
63
 
54
- logger.debug("Celery task scheduled", task_id=task_attempt.id, celery_task_id=celery_task.id)
64
+ logger.debug("Celery task scheduled", task_id=task_attempt.id)
@@ -0,0 +1,128 @@
1
+ """
2
+ Optional Prometheus metrics for block dumper.
3
+
4
+ Metrics are only available if prometheus_client is installed.
5
+ Install with: pip install abstract-block-dumper[prometheus]
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import TYPE_CHECKING, Any, Self
11
+
12
+ import structlog
13
+
14
+ if TYPE_CHECKING:
15
+ from types import TracebackType
16
+
17
+ logger = structlog.get_logger(__name__)
18
+
19
+ # Conditional import - metrics only work if prometheus_client is installed
20
+ try:
21
+ from prometheus_client import Counter, Gauge, Histogram
22
+
23
+ PROMETHEUS_AVAILABLE = True
24
+ except ImportError:
25
+ PROMETHEUS_AVAILABLE = False
26
+
27
+
28
+ # Define no-op placeholders when prometheus is not available
29
+ BLOCKS_PROCESSED = None
30
+ TASKS_SUBMITTED = None
31
+ CURRENT_BLOCK = None
32
+ BACKFILL_PROGRESS = None
33
+ BACKFILL_FROM_BLOCK = None
34
+ BACKFILL_TO_BLOCK = None
35
+ BLOCK_PROCESSING_TIME = None
36
+
37
+ if PROMETHEUS_AVAILABLE:
38
+ BLOCKS_PROCESSED = Counter( # type: ignore
39
+ "block_dumper_blocks_processed_total",
40
+ "Total blocks processed",
41
+ ["mode"], # 'realtime' or 'backfill'
42
+ )
43
+ TASKS_SUBMITTED = Counter( # type: ignore
44
+ "block_dumper_tasks_submitted_total",
45
+ "Total tasks submitted to Celery",
46
+ ["task_name"],
47
+ )
48
+ CURRENT_BLOCK = Gauge( # type: ignore
49
+ "block_dumper_current_block",
50
+ "Current block being processed",
51
+ ["mode"],
52
+ )
53
+ BACKFILL_PROGRESS = Gauge( # type: ignore
54
+ "block_dumper_backfill_progress_percent",
55
+ "Backfill progress percentage",
56
+ )
57
+ BACKFILL_FROM_BLOCK = Gauge( # type: ignore
58
+ "block_dumper_backfill_from_block",
59
+ "Backfill starting block number",
60
+ )
61
+ BACKFILL_TO_BLOCK = Gauge( # type: ignore
62
+ "block_dumper_backfill_to_block",
63
+ "Backfill target block number",
64
+ )
65
+ BLOCK_PROCESSING_TIME = Histogram( # type: ignore
66
+ "block_dumper_block_processing_seconds",
67
+ "Time spent processing each block",
68
+ ["mode"],
69
+ )
70
+
71
+
72
+ def increment_blocks_processed(mode: str) -> None:
73
+ """Increment the blocks processed counter."""
74
+ if PROMETHEUS_AVAILABLE and BLOCKS_PROCESSED is not None:
75
+ BLOCKS_PROCESSED.labels(mode=mode).inc()
76
+
77
+
78
+ def increment_tasks_submitted(task_name: str) -> None:
79
+ """Increment the tasks submitted counter."""
80
+ if PROMETHEUS_AVAILABLE and TASKS_SUBMITTED is not None:
81
+ TASKS_SUBMITTED.labels(task_name=task_name).inc()
82
+
83
+
84
+ def set_current_block(mode: str, block_number: int) -> None:
85
+ """Set the current block being processed."""
86
+ if PROMETHEUS_AVAILABLE and CURRENT_BLOCK is not None:
87
+ CURRENT_BLOCK.labels(mode=mode).set(block_number)
88
+
89
+
90
+ def set_backfill_progress(from_block: int, to_block: int, current_block: int) -> None:
91
+ """Set backfill progress metrics."""
92
+ if not PROMETHEUS_AVAILABLE:
93
+ return
94
+
95
+ if BACKFILL_FROM_BLOCK is not None:
96
+ BACKFILL_FROM_BLOCK.set(from_block)
97
+ if BACKFILL_TO_BLOCK is not None:
98
+ BACKFILL_TO_BLOCK.set(to_block)
99
+
100
+ if BACKFILL_PROGRESS is not None:
101
+ total_blocks = to_block - from_block
102
+ if total_blocks > 0:
103
+ processed = current_block - from_block
104
+ progress = (processed / total_blocks) * 100
105
+ BACKFILL_PROGRESS.set(progress)
106
+
107
+
108
+ class BlockProcessingTimer:
109
+ """Context manager for timing block processing."""
110
+
111
+ def __init__(self, mode: str) -> None:
112
+ self.mode = mode
113
+ self._timer: Any = None
114
+
115
+ def __enter__(self) -> Self:
116
+ if PROMETHEUS_AVAILABLE and BLOCK_PROCESSING_TIME is not None:
117
+ self._timer = BLOCK_PROCESSING_TIME.labels(mode=self.mode).time()
118
+ self._timer.__enter__() # Start the timer
119
+ return self
120
+
121
+ def __exit__(
122
+ self,
123
+ exc_type: type[BaseException] | None,
124
+ exc_val: BaseException | None,
125
+ exc_tb: TracebackType | None,
126
+ ) -> None:
127
+ if self._timer is not None:
128
+ self._timer.__exit__(exc_type, exc_val, exc_tb)
@@ -7,22 +7,87 @@ from django.conf import settings
7
7
  import abstract_block_dumper._internal.dal.django_dal as abd_dal
8
8
  import abstract_block_dumper._internal.services.utils as abd_utils
9
9
  from abstract_block_dumper._internal.services.block_processor import BlockProcessor, block_processor_factory
10
+ from abstract_block_dumper._internal.services.metrics import (
11
+ BlockProcessingTimer,
12
+ increment_blocks_processed,
13
+ set_current_block,
14
+ )
10
15
 
11
16
  logger = structlog.get_logger(__name__)
12
17
 
18
+ # Blocks older than this threshold from current head require archive network
19
+ ARCHIVE_BLOCK_THRESHOLD = 300
20
+
13
21
 
14
22
  class TaskScheduler:
15
23
  def __init__(
16
24
  self,
17
25
  block_processor: BlockProcessor,
18
- subtensor: bt.Subtensor,
26
+ network: str,
19
27
  poll_interval: int,
28
+ realtime_head_only: bool = False,
20
29
  ) -> None:
21
30
  self.block_processor = block_processor
22
- self.subtensor = subtensor
31
+ self.network = network
23
32
  self.poll_interval = poll_interval
33
+ self.realtime_head_only = realtime_head_only
24
34
  self.last_processed_block = -1
25
35
  self.is_running = False
36
+ self._subtensor: bt.Subtensor | None = None
37
+ self._archive_subtensor: bt.Subtensor | None = None
38
+ self._current_block_cache: int | None = None
39
+
40
+ @property
41
+ def subtensor(self) -> bt.Subtensor:
42
+ """Get the regular subtensor connection, creating it if needed."""
43
+ if self._subtensor is None:
44
+ self._subtensor = abd_utils.get_bittensor_client(self.network)
45
+ return self._subtensor
46
+
47
+ @subtensor.setter
48
+ def subtensor(self, value: bt.Subtensor | None) -> None:
49
+ """Set or reset the subtensor connection."""
50
+ self._subtensor = value
51
+
52
+ @property
53
+ def archive_subtensor(self) -> bt.Subtensor:
54
+ """Get the archive subtensor connection, creating it if needed."""
55
+ if self._archive_subtensor is None:
56
+ self._archive_subtensor = abd_utils.get_bittensor_client("archive")
57
+ return self._archive_subtensor
58
+
59
+ @archive_subtensor.setter
60
+ def archive_subtensor(self, value: bt.Subtensor | None) -> None:
61
+ """Set or reset the archive subtensor connection."""
62
+ self._archive_subtensor = value
63
+
64
+ def get_subtensor_for_block(self, block_number: int) -> bt.Subtensor:
65
+ """
66
+ Get the appropriate subtensor for the given block number.
67
+
68
+ Uses archive network for blocks older than ARCHIVE_BLOCK_THRESHOLD
69
+ from the current head.
70
+ """
71
+ if self._current_block_cache is None:
72
+ self._current_block_cache = self.subtensor.get_current_block()
73
+
74
+ blocks_behind = self._current_block_cache - block_number
75
+
76
+ if blocks_behind > ARCHIVE_BLOCK_THRESHOLD:
77
+ logger.debug(
78
+ "Using archive network for old block",
79
+ block_number=block_number,
80
+ blocks_behind=blocks_behind,
81
+ )
82
+ return self.archive_subtensor
83
+ return self.subtensor
84
+
85
+ def refresh_connections(self) -> None:
86
+ """Reset all subtensor connections to force re-establishment."""
87
+ self._subtensor = None
88
+ self._archive_subtensor = None
89
+ self._current_block_cache = None
90
+ logger.info("Subtensor connections reset")
26
91
 
27
92
  def start(self) -> None:
28
93
  self.is_running = True
@@ -37,16 +102,37 @@ class TaskScheduler:
37
102
 
38
103
  while self.is_running:
39
104
  try:
40
- # Process lost retries first
41
- self.block_processor.recover_failed_retries()
105
+ if self._current_block_cache is not None:
106
+ self.subtensor = self.get_subtensor_for_block(self._current_block_cache)
42
107
 
43
- current_block = self.subtensor.get_current_block()
108
+ # Update current block cache for archive network decision
109
+ self._current_block_cache = self.subtensor.get_current_block()
110
+ current_block = self._current_block_cache
44
111
 
45
- for block_number in range(self.last_processed_block + 1, current_block + 1):
46
- self.block_processor.process_block(block_number)
47
- self.last_processed_block = block_number
112
+ if self.realtime_head_only:
113
+ # Only process the current head block, skip if already processed
114
+ if current_block != self.last_processed_block:
115
+ with BlockProcessingTimer(mode="realtime"):
116
+ self.block_processor.process_block(current_block)
117
+
118
+ set_current_block("realtime", current_block)
119
+ increment_blocks_processed("realtime")
120
+ self.last_processed_block = current_block
121
+
122
+ time.sleep(self.poll_interval)
123
+ else:
124
+ # Original behavior: process all blocks from last_processed to current
125
+ for block_number in range(self.last_processed_block + 1, current_block + 1):
126
+ with BlockProcessingTimer(mode="realtime"):
127
+ self.block_processor.process_block(block_number)
128
+
129
+ # Update metrics
130
+ set_current_block("realtime", block_number)
131
+ increment_blocks_processed("realtime")
132
+
133
+ time.sleep(self.poll_interval)
134
+ self.last_processed_block = block_number
48
135
 
49
- time.sleep(self.poll_interval)
50
136
  except KeyboardInterrupt:
51
137
  logger.info("TaskScheduler stopping due to KeyboardInterrupt.")
52
138
  self.stop()
@@ -86,9 +172,17 @@ class TaskScheduler:
86
172
  )
87
173
 
88
174
 
89
- def task_scheduler_factory() -> TaskScheduler:
175
+ def task_scheduler_factory(network: str = "finney") -> TaskScheduler:
176
+ """
177
+ Factory for TaskScheduler.
178
+
179
+ Args:
180
+ network (str): Bittensor network name. Defaults to "finney"
181
+
182
+ """
90
183
  return TaskScheduler(
91
184
  block_processor=block_processor_factory(),
92
- subtensor=abd_utils.get_bittensor_client(),
185
+ network=network,
93
186
  poll_interval=getattr(settings, "BLOCK_DUMPER_POLL_INTERVAL", 1),
187
+ realtime_head_only=getattr(settings, "BLOCK_DUMPER_REALTIME_HEAD_ONLY", True),
94
188
  )
@@ -1,6 +1,5 @@
1
1
  import json
2
2
  from collections.abc import Callable
3
- from functools import cache
4
3
 
5
4
  import bittensor as bt
6
5
  import structlog
@@ -10,16 +9,13 @@ from django.conf import settings
10
9
  logger = structlog.get_logger(__name__)
11
10
 
12
11
 
13
- @cache
14
- def get_bittensor_client() -> bt.Subtensor:
12
+ def get_bittensor_client(network: str = "finney") -> bt.Subtensor:
15
13
  """
16
14
  Get a cached bittensor client.
17
15
 
18
16
  The client is cached indefinitely since network configuration
19
17
  doesn't change during runtime.
20
18
  """
21
- DEFAULT_BITTENSOR_NETWORK = "finney"
22
- network = getattr(settings, "BITTENSOR_NETWORK", DEFAULT_BITTENSOR_NETWORK)
23
19
  logger.info("Creating new bittensor client for network", network=network)
24
20
  return bt.subtensor(network=network)
25
21
 
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.0.6'
32
- __version_tuple__ = version_tuple = (0, 0, 6)
31
+ __version__ = version = '0.0.8'
32
+ __version_tuple__ = version_tuple = (0, 0, 8)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -0,0 +1,162 @@
1
+ """Management command for backfilling historical blocks."""
2
+
3
+ from django.core.management.base import BaseCommand
4
+
5
+ from abstract_block_dumper._internal.dal.memory_registry import task_registry
6
+ from abstract_block_dumper._internal.discovery import ensure_modules_loaded
7
+ from abstract_block_dumper._internal.services.backfill_scheduler import (
8
+ ARCHIVE_BLOCK_THRESHOLD,
9
+ BackfillScheduler,
10
+ backfill_scheduler_factory,
11
+ )
12
+
13
+
14
+ class Command(BaseCommand):
15
+ help = "Backfill historical blocks with rate limiting."
16
+
17
+ def add_arguments(self, parser) -> None:
18
+ parser.add_argument(
19
+ "--from-block",
20
+ type=int,
21
+ required=True,
22
+ help="Starting block number (inclusive)",
23
+ )
24
+ parser.add_argument(
25
+ "--to-block",
26
+ type=int,
27
+ required=True,
28
+ help="Ending block number (inclusive)",
29
+ )
30
+ parser.add_argument(
31
+ "--rate-limit",
32
+ type=float,
33
+ default=1.0,
34
+ help="Seconds to sleep between processing each block (default: 1.0)",
35
+ )
36
+ parser.add_argument(
37
+ "--network",
38
+ type=str,
39
+ default="finney",
40
+ help="Bittensor network name (default: finney)",
41
+ )
42
+ parser.add_argument(
43
+ "--dry-run",
44
+ action="store_true",
45
+ help="Preview blocks to backfill without executing tasks",
46
+ )
47
+
48
+ def handle(self, *args, **options) -> None:
49
+ from_block = options["from_block"]
50
+ to_block = options["to_block"]
51
+ rate_limit = options["rate_limit"]
52
+ network = options["network"]
53
+ dry_run = options["dry_run"]
54
+
55
+ # Validate arguments
56
+ if from_block > to_block:
57
+ self.stderr.write(self.style.ERROR(f"--from-block ({from_block}) must be <= --to-block ({to_block})"))
58
+ return
59
+
60
+ if rate_limit < 0:
61
+ self.stderr.write(self.style.ERROR("--rate-limit must be >= 0"))
62
+ return
63
+
64
+ # Load registered functions
65
+ self.stdout.write("Syncing decorated functions...")
66
+ ensure_modules_loaded()
67
+ functions_counter = len(task_registry.get_functions())
68
+ self.stdout.write(self.style.SUCCESS(f"Synced {functions_counter} functions"))
69
+
70
+ if functions_counter == 0:
71
+ self.stderr.write(self.style.WARNING("No functions registered. Nothing to backfill."))
72
+ return
73
+
74
+ # Create scheduler
75
+ scheduler = backfill_scheduler_factory(
76
+ from_block=from_block,
77
+ to_block=to_block,
78
+ network=network,
79
+ rate_limit=rate_limit,
80
+ dry_run=dry_run,
81
+ )
82
+
83
+ total_blocks = to_block - from_block + 1
84
+
85
+ if dry_run:
86
+ self._handle_dry_run(scheduler, from_block, to_block, total_blocks, rate_limit)
87
+ else:
88
+ self._handle_backfill(scheduler, from_block, to_block, total_blocks, rate_limit)
89
+
90
+ def _handle_dry_run(
91
+ self, scheduler: BackfillScheduler, from_block: int, to_block: int, total_blocks: int, rate_limit: float
92
+ ) -> None:
93
+ """Handle dry-run mode output."""
94
+ self.stdout.write("")
95
+ self.stdout.write(self.style.WARNING("Dry-run mode: previewing blocks to backfill (no tasks will be executed)"))
96
+ self.stdout.write("")
97
+
98
+ # Get network type
99
+ scheduler._current_head_cache = scheduler.subtensor.get_current_block()
100
+ network_type = scheduler._get_network_type_for_block(from_block)
101
+
102
+ self.stdout.write(f"Block range: {from_block} -> {to_block} ({total_blocks} blocks)")
103
+ operator = ">" if network_type == "archive" else "<="
104
+ self.stdout.write(f"Network: {network_type} (blocks {operator}{ARCHIVE_BLOCK_THRESHOLD} behind head)")
105
+ self.stdout.write(f"Current head: {scheduler._current_head_cache}")
106
+ self.stdout.write("")
107
+
108
+ # Show registry items
109
+ self.stdout.write("Registry items:")
110
+ for registry_item in scheduler.block_processor.registry.get_functions():
111
+ self.stdout.write(f" - {registry_item.executable_path}")
112
+ self.stdout.write("")
113
+
114
+ # Run dry-run
115
+ self.stdout.write("Analyzing blocks...")
116
+ stats = scheduler.start()
117
+
118
+ if stats is None:
119
+ self.stderr.write(self.style.ERROR("Dry-run failed"))
120
+ return
121
+
122
+ # Output summary
123
+ self.stdout.write("")
124
+ self.stdout.write(self.style.SUCCESS("Summary:"))
125
+ self.stdout.write(f" Total blocks in range: {stats.total_blocks}")
126
+ self.stdout.write(f" Already processed (all tasks done): {stats.already_processed}")
127
+ self.stdout.write(f" Blocks needing tasks: {stats.blocks_needing_tasks}")
128
+ self.stdout.write(f" Estimated tasks to submit: {stats.estimated_tasks}")
129
+
130
+ if rate_limit > 0 and stats.blocks_needing_tasks > 0:
131
+ estimated_seconds = stats.blocks_needing_tasks * rate_limit
132
+ if estimated_seconds < 60:
133
+ time_str = f"~{estimated_seconds:.0f} seconds"
134
+ elif estimated_seconds < 3600:
135
+ time_str = f"~{estimated_seconds / 60:.1f} minutes"
136
+ else:
137
+ time_str = f"~{estimated_seconds / 3600:.1f} hours"
138
+ self.stdout.write(f" Estimated time at {rate_limit}s rate limit: {time_str}")
139
+
140
+ def _handle_backfill(self, scheduler, from_block: int, to_block: int, total_blocks: int, rate_limit: float) -> None:
141
+ """Handle actual backfill execution."""
142
+ self.stdout.write("")
143
+ self.stdout.write(f"Starting backfill: {from_block} -> {to_block} ({total_blocks} blocks)")
144
+ self.stdout.write(f"Rate limit: {rate_limit} seconds between blocks")
145
+
146
+ if rate_limit > 0:
147
+ estimated_seconds = total_blocks * rate_limit
148
+ if estimated_seconds < 60:
149
+ time_str = f"~{estimated_seconds:.0f} seconds"
150
+ elif estimated_seconds < 3600:
151
+ time_str = f"~{estimated_seconds / 60:.1f} minutes"
152
+ else:
153
+ time_str = f"~{estimated_seconds / 3600:.1f} hours"
154
+ self.stdout.write(f"Estimated max time: {time_str}")
155
+
156
+ self.stdout.write("")
157
+ self.stdout.write("Press Ctrl+C to stop gracefully...")
158
+ self.stdout.write("")
159
+
160
+ scheduler.start()
161
+
162
+ self.stdout.write(self.style.SUCCESS("Backfill completed"))
@@ -1,3 +1,4 @@
1
+ import inspect
1
2
  from collections.abc import Callable
2
3
  from typing import Any, cast
3
4
 
@@ -67,12 +68,18 @@ def _celery_task_wrapper(
67
68
  ) -> dict[str, Any] | None:
68
69
  executable_path = abd_utils.get_executable_path(func)
69
70
 
71
+ # Extract runtime hints that shouldn't be stored in DB
72
+ use_archive_network = kwargs.pop("_use_archive_network", False)
73
+
74
+ # Create db_kwargs without runtime hints for DB lookup
75
+ db_kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")}
76
+
70
77
  with transaction.atomic():
71
78
  try:
72
79
  task_attempt = TaskAttempt.objects.select_for_update(nowait=True).get(
73
80
  block_number=block_number,
74
81
  executable_path=executable_path,
75
- args_json=abd_utils.serialize_args(kwargs),
82
+ args_json=abd_utils.serialize_args(db_kwargs),
76
83
  )
77
84
  except TaskAttempt.DoesNotExist as exc:
78
85
  msg = "TaskAttempt not found - task may have been canceled directly"
@@ -96,6 +103,8 @@ def _celery_task_wrapper(
96
103
 
97
104
  # Start task execution
98
105
  try:
106
+ # Pass _use_archive_network only if the function accepts **kwargs
107
+ # Otherwise, strip it to avoid TypeError
99
108
  execution_kwargs = {"block_number": block_number, **kwargs}
100
109
  logger.info(
101
110
  "Starting task execution",
@@ -103,9 +112,15 @@ def _celery_task_wrapper(
103
112
  block_number=block_number,
104
113
  executable_path=executable_path,
105
114
  celery_task_id=task_attempt.celery_task_id,
106
- execution_kwargs=execution_kwargs,
115
+ use_archive_network=use_archive_network,
107
116
  )
108
117
 
118
+ # Check if function accepts **kwargs before adding _use_archive_network
119
+ sig = inspect.signature(func)
120
+ has_var_keyword = any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values())
121
+ if has_var_keyword:
122
+ execution_kwargs["_use_archive_network"] = use_archive_network
123
+
109
124
  result = func(**execution_kwargs)
110
125
 
111
126
  abd_dal.task_mark_as_success(task_attempt, result)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstract-block-dumper
3
- Version: 0.0.6
3
+ Version: 0.0.8
4
4
  Project-URL: Source, https://github.com/bactensor/abstract-block-dumper
5
5
  Project-URL: Issue Tracker, https://github.com/bactensor/abstract-block-dumper/issues
6
6
  Author-email: Reef Technologies <opensource@reef.pl>
@@ -18,6 +18,8 @@ Requires-Dist: bittensor>=9.10.1
18
18
  Requires-Dist: celery>=5.3
19
19
  Requires-Dist: django<6.0,>=3.2
20
20
  Requires-Dist: structlog>=25.4.0
21
+ Provides-Extra: prometheus
22
+ Requires-Dist: prometheus-client>=0.17.0; extra == 'prometheus'
21
23
  Description-Content-Type: text/markdown
22
24
 
23
25
  # Abstract Block Dumper
@@ -1,5 +1,5 @@
1
1
  abstract_block_dumper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- abstract_block_dumper/_version.py,sha256=7MyqQ3iPP2mJruPfRYGCNCq1z7_Nk7c-eyYecYITxsY,704
2
+ abstract_block_dumper/_version.py,sha256=j-ar4gJGiWIawqKXhvv9hGJWLfwu0tISl-0GV97B7a0,704
3
3
  abstract_block_dumper/admin.py,sha256=3J3I_QOKFgfMNpTXW-rTQGO_q5Ls6uNuL0FkPVdIsYg,1654
4
4
  abstract_block_dumper/apps.py,sha256=DXATdrjsL3T2IletTbKeD6unr8ScLaxg7wz0nAHTAns,215
5
5
  abstract_block_dumper/models.py,sha256=MO9824dmHB6xF3PrFE_RERh7whVjQtS4tt6QA0wSbg0,2022
@@ -8,22 +8,25 @@ abstract_block_dumper/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm
8
8
  abstract_block_dumper/_internal/discovery.py,sha256=sISOL8vq6rC0pOndrCfWKDZjyYwzzZIChG-BH9mteq0,745
9
9
  abstract_block_dumper/_internal/exceptions.py,sha256=jVXQ8b3gneno2XYvO0XisJPMlkAWb6H5u10egIpPJ4k,335
10
10
  abstract_block_dumper/_internal/dal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- abstract_block_dumper/_internal/dal/django_dal.py,sha256=pBGEFeo_U0ac2Za-dwzJvf04Ng8lP51aR60c_DUrGIw,5426
12
- abstract_block_dumper/_internal/dal/memory_registry.py,sha256=yMNF7jrvWGF-S1pqyR2zOCNLWwrdsImcvV6cGqu1wYE,2972
11
+ abstract_block_dumper/_internal/dal/django_dal.py,sha256=i9jocanfptjXw5lfE2xBYvx5mo1g98IoMjlS-WjGP88,5623
12
+ abstract_block_dumper/_internal/dal/memory_registry.py,sha256=m9Yms-cuemi9_5q_Kn_zsJnxDPEiuAUkESIAltD60QY,2943
13
13
  abstract_block_dumper/_internal/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- abstract_block_dumper/_internal/services/block_processor.py,sha256=wB-zeft3Ys8zmqCdF_v12rXd6umNWvGfy2Ts6XSGkL8,8132
15
- abstract_block_dumper/_internal/services/executor.py,sha256=ZZmQ9TzoNEoAE4amiU8lHRsTfP7YusUkWXasrArfo2g,1806
16
- abstract_block_dumper/_internal/services/scheduler.py,sha256=NrT3t0oVR-osf50tWWqcxojkVkxhd2PHsk0PuXD5RMc,3593
17
- abstract_block_dumper/_internal/services/utils.py,sha256=Y8b8KdKn53mcuWchw6b5EJq9ipO4p1FFf6g_Fpbg7cQ,1273
14
+ abstract_block_dumper/_internal/services/backfill_scheduler.py,sha256=PTBs3tN4LLvFYuXKVsHVpme66RAsnPt166RWlRNA4xQ,15955
15
+ abstract_block_dumper/_internal/services/block_processor.py,sha256=NC7p1oD38FpaZb6EbykBolP32uY069abumOvXrjOBV0,6644
16
+ abstract_block_dumper/_internal/services/executor.py,sha256=vuAALKuP7KTQuQyG-P8JfrU22Sr-90HzXsIdfYbXZy4,2080
17
+ abstract_block_dumper/_internal/services/metrics.py,sha256=FLhpq63WVccc0N6K1rN2VnV90jywFfAiG2ZxoDPALv0,3929
18
+ abstract_block_dumper/_internal/services/scheduler.py,sha256=1Ls9dn_siEKl6yXJTS_B4NKhA3ZpZxrMJM_whRtEdxk,7453
19
+ abstract_block_dumper/_internal/services/utils.py,sha256=QSs2hBHWOPgNgKPf_ZmADXuqEiqK5mWZp7JblvQgxZQ,1140
18
20
  abstract_block_dumper/management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
21
  abstract_block_dumper/management/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
+ abstract_block_dumper/management/commands/backfill_blocks_v1.py,sha256=EmNUozAZn8uThjCvusZe7poNrw9RYy-MafMg2wu3XeQ,6392
20
23
  abstract_block_dumper/management/commands/block_tasks_v1.py,sha256=jSi04ahIKYwlm_dNKCUGL_cmALv1iP-ZjfXrmz0pn-4,880
21
24
  abstract_block_dumper/migrations/0001_initial.py,sha256=ImPHC3G6kPkq4Xn_4YVAm4Labh1Xi7PkCRszYRGpTiI,2298
22
25
  abstract_block_dumper/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
26
  abstract_block_dumper/v1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
27
  abstract_block_dumper/v1/celery.py,sha256=X4IqVs5i6ZpyY7fy1SqMZgsZy4SXP-jK2qG-FYnjU38,1722
25
- abstract_block_dumper/v1/decorators.py,sha256=SBl8XP9qhKyTdsKaRREW870BZGidEe0C_nmxnwh76lo,8156
28
+ abstract_block_dumper/v1/decorators.py,sha256=Lua91tR-d0juif9VGVxnC8bzhcgyn8_1bSR33hw7IK0,8920
26
29
  abstract_block_dumper/v1/tasks.py,sha256=u9iMYdDUqzYT3yPrNwZecHnlweZ3yFipV9BcIWHCbus,2647
27
- abstract_block_dumper-0.0.6.dist-info/METADATA,sha256=yXyU72VareEjcSb8A4Tur5hWkgk5k6lBLy9cnqR--kY,12902
28
- abstract_block_dumper-0.0.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
29
- abstract_block_dumper-0.0.6.dist-info/RECORD,,
30
+ abstract_block_dumper-0.0.8.dist-info/METADATA,sha256=O5E4ChDm8gX0XSs_zvImjBgl9Q_DJkNu_rW5KSx-fMk,12993
31
+ abstract_block_dumper-0.0.8.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
32
+ abstract_block_dumper-0.0.8.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: hatchling 1.27.0
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any