abstract-block-dumper 0.0.8__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,8 +18,10 @@ import abstract_block_dumper._internal.services.utils as abd_utils
18
18
  from abstract_block_dumper._internal.services.block_processor import BlockProcessor, block_processor_factory
19
19
  from abstract_block_dumper._internal.services.metrics import (
20
20
  BlockProcessingTimer,
21
+ increment_archive_network_usage,
21
22
  increment_blocks_processed,
22
23
  set_backfill_progress,
24
+ set_block_lag,
23
25
  set_current_block,
24
26
  )
25
27
  from abstract_block_dumper._internal.services.utils import serialize_args
@@ -275,6 +277,10 @@ class BackfillScheduler:
275
277
  set_backfill_progress(self.from_block, self.to_block, block_number)
276
278
  increment_blocks_processed("backfill")
277
279
 
280
+ # Track block lag (distance from chain head)
281
+ if self._current_head_cache:
282
+ set_block_lag("backfill", self._current_head_cache - block_number)
283
+
278
284
  # Log progress periodically
279
285
  if processed_count % PROGRESS_LOG_INTERVAL == 0:
280
286
  progress_pct = (processed_count / total_blocks) * 100
@@ -380,6 +386,8 @@ class BackfillScheduler:
380
386
  try:
381
387
  if registry_item.match_condition(block_number, **args):
382
388
  use_archive = self._requires_archive_network(block_number)
389
+ if use_archive:
390
+ increment_archive_network_usage()
383
391
  logger.debug(
384
392
  "Backfilling block",
385
393
  function_name=registry_item.function.__name__,
@@ -4,6 +4,7 @@ import structlog
4
4
 
5
5
  import abstract_block_dumper._internal.dal.django_dal as abd_dal
6
6
  from abstract_block_dumper._internal.dal.memory_registry import RegistryItem
7
+ from abstract_block_dumper._internal.services.metrics import increment_tasks_submitted
7
8
  from abstract_block_dumper.models import TaskAttempt
8
9
 
9
10
  logger = structlog.get_logger(__name__)
@@ -61,4 +62,8 @@ class CeleryExecutor:
61
62
  # The task ID can be retrieved from the task_attempt if needed
62
63
  registry_item.function.apply_async(**apply_async_kwargs)
63
64
 
65
+ # Track task submission metric
66
+ task_name = registry_item.executable_path.split(".")[-1]
67
+ increment_tasks_submitted(task_name)
68
+
64
69
  logger.debug("Celery task scheduled", task_id=task_attempt.id)
@@ -33,6 +33,15 @@ BACKFILL_PROGRESS = None
33
33
  BACKFILL_FROM_BLOCK = None
34
34
  BACKFILL_TO_BLOCK = None
35
35
  BLOCK_PROCESSING_TIME = None
36
+ # Task-level metrics
37
+ TASK_EXECUTIONS = None
38
+ TASK_EXECUTION_TIME = None
39
+ TASK_RETRIES = None
40
+ # Business/observability metrics
41
+ BLOCK_LAG = None # How far behind the chain head
42
+ PENDING_TASKS = None # Current pending tasks count
43
+ REGISTERED_TASKS = None # Number of registered task types
44
+ ARCHIVE_NETWORK_USAGE = None # Counter for archive network fallback
36
45
 
37
46
  if PROMETHEUS_AVAILABLE:
38
47
  BLOCKS_PROCESSED = Counter( # type: ignore
@@ -67,6 +76,41 @@ if PROMETHEUS_AVAILABLE:
67
76
  "Time spent processing each block",
68
77
  ["mode"],
69
78
  )
79
+ # Task-level metrics
80
+ TASK_EXECUTIONS = Counter( # type: ignore
81
+ "block_dumper_task_executions_total",
82
+ "Total task executions by status",
83
+ ["task_name", "status"], # status: 'success', 'failed'
84
+ )
85
+ TASK_EXECUTION_TIME = Histogram( # type: ignore
86
+ "block_dumper_task_execution_seconds",
87
+ "Time spent executing each task",
88
+ ["task_name"],
89
+ buckets=(0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 30.0, 60.0, 120.0),
90
+ )
91
+ TASK_RETRIES = Counter( # type: ignore
92
+ "block_dumper_task_retries_total",
93
+ "Total task retry attempts",
94
+ ["task_name"],
95
+ )
96
+ # Business/observability metrics
97
+ BLOCK_LAG = Gauge( # type: ignore
98
+ "block_dumper_block_lag",
99
+ "Number of blocks behind the chain head",
100
+ ["mode"], # 'realtime' or 'backfill'
101
+ )
102
+ PENDING_TASKS = Gauge( # type: ignore
103
+ "block_dumper_pending_tasks",
104
+ "Current number of pending tasks in queue",
105
+ )
106
+ REGISTERED_TASKS = Gauge( # type: ignore
107
+ "block_dumper_registered_tasks",
108
+ "Number of registered task types",
109
+ )
110
+ ARCHIVE_NETWORK_USAGE = Counter( # type: ignore
111
+ "block_dumper_archive_network_requests_total",
112
+ "Total requests using archive network",
113
+ )
70
114
 
71
115
 
72
116
  def increment_blocks_processed(mode: str) -> None:
@@ -105,6 +149,71 @@ def set_backfill_progress(from_block: int, to_block: int, current_block: int) ->
105
149
  BACKFILL_PROGRESS.set(progress)
106
150
 
107
151
 
152
+ def set_block_lag(mode: str, lag: int) -> None:
153
+ """Set the current block lag (distance from chain head)."""
154
+ if PROMETHEUS_AVAILABLE and BLOCK_LAG is not None:
155
+ BLOCK_LAG.labels(mode=mode).set(lag)
156
+
157
+
158
+ def set_pending_tasks(count: int) -> None:
159
+ """Set the current number of pending tasks."""
160
+ if PROMETHEUS_AVAILABLE and PENDING_TASKS is not None:
161
+ PENDING_TASKS.set(count)
162
+
163
+
164
+ def set_registered_tasks(count: int) -> None:
165
+ """Set the number of registered task types."""
166
+ if PROMETHEUS_AVAILABLE and REGISTERED_TASKS is not None:
167
+ REGISTERED_TASKS.set(count)
168
+
169
+
170
+ def increment_archive_network_usage() -> None:
171
+ """Increment the archive network usage counter."""
172
+ if PROMETHEUS_AVAILABLE and ARCHIVE_NETWORK_USAGE is not None:
173
+ ARCHIVE_NETWORK_USAGE.inc()
174
+
175
+
176
+ def record_task_execution(task_name: str, status: str) -> None:
177
+ """Record a task execution with status (success/failed)."""
178
+ if PROMETHEUS_AVAILABLE and TASK_EXECUTIONS is not None:
179
+ TASK_EXECUTIONS.labels(task_name=task_name, status=status).inc()
180
+
181
+
182
+ def record_task_retry(task_name: str) -> None:
183
+ """Record a task retry attempt."""
184
+ if PROMETHEUS_AVAILABLE and TASK_RETRIES is not None:
185
+ TASK_RETRIES.labels(task_name=task_name).inc()
186
+
187
+
188
+ def observe_task_execution_time(task_name: str, duration: float) -> None:
189
+ """Record task execution duration in seconds."""
190
+ if PROMETHEUS_AVAILABLE and TASK_EXECUTION_TIME is not None:
191
+ TASK_EXECUTION_TIME.labels(task_name=task_name).observe(duration)
192
+
193
+
194
+ class TaskExecutionTimer:
195
+ """Context manager for timing task execution."""
196
+
197
+ def __init__(self, task_name: str) -> None:
198
+ self.task_name = task_name
199
+ self._timer: Any = None
200
+
201
+ def __enter__(self) -> Self:
202
+ if PROMETHEUS_AVAILABLE and TASK_EXECUTION_TIME is not None:
203
+ self._timer = TASK_EXECUTION_TIME.labels(task_name=self.task_name).time()
204
+ self._timer.__enter__()
205
+ return self
206
+
207
+ def __exit__(
208
+ self,
209
+ exc_type: type[BaseException] | None,
210
+ exc_val: BaseException | None,
211
+ exc_tb: TracebackType | None,
212
+ ) -> None:
213
+ if self._timer is not None:
214
+ self._timer.__exit__(exc_type, exc_val, exc_tb)
215
+
216
+
108
217
  class BlockProcessingTimer:
109
218
  """Context manager for timing block processing."""
110
219
 
@@ -10,7 +10,9 @@ from abstract_block_dumper._internal.services.block_processor import BlockProces
10
10
  from abstract_block_dumper._internal.services.metrics import (
11
11
  BlockProcessingTimer,
12
12
  increment_blocks_processed,
13
+ set_block_lag,
13
14
  set_current_block,
15
+ set_registered_tasks,
14
16
  )
15
17
 
16
18
  logger = structlog.get_logger(__name__)
@@ -25,12 +27,10 @@ class TaskScheduler:
25
27
  block_processor: BlockProcessor,
26
28
  network: str,
27
29
  poll_interval: int,
28
- realtime_head_only: bool = False,
29
30
  ) -> None:
30
31
  self.block_processor = block_processor
31
32
  self.network = network
32
33
  self.poll_interval = poll_interval
33
- self.realtime_head_only = realtime_head_only
34
34
  self.last_processed_block = -1
35
35
  self.is_running = False
36
36
  self._subtensor: bt.Subtensor | None = None
@@ -94,10 +94,13 @@ class TaskScheduler:
94
94
 
95
95
  self.initialize_last_block()
96
96
 
97
+ registered_tasks_count = len(self.block_processor.registry.get_functions())
98
+ set_registered_tasks(registered_tasks_count)
99
+
97
100
  logger.info(
98
101
  "TaskScheduler started",
99
102
  last_processed_block=self.last_processed_block,
100
- registry_functions=len(self.block_processor.registry.get_functions()),
103
+ registry_functions=registered_tasks_count,
101
104
  )
102
105
 
103
106
  while self.is_running:
@@ -109,29 +112,17 @@ class TaskScheduler:
109
112
  self._current_block_cache = self.subtensor.get_current_block()
110
113
  current_block = self._current_block_cache
111
114
 
112
- if self.realtime_head_only:
113
- # Only process the current head block, skip if already processed
114
- if current_block != self.last_processed_block:
115
- with BlockProcessingTimer(mode="realtime"):
116
- self.block_processor.process_block(current_block)
117
-
118
- set_current_block("realtime", current_block)
119
- increment_blocks_processed("realtime")
120
- self.last_processed_block = current_block
115
+ # Only process the current head block, skip if already processed
116
+ if current_block != self.last_processed_block:
117
+ with BlockProcessingTimer(mode="realtime"):
118
+ self.block_processor.process_block(current_block)
121
119
 
122
- time.sleep(self.poll_interval)
123
- else:
124
- # Original behavior: process all blocks from last_processed to current
125
- for block_number in range(self.last_processed_block + 1, current_block + 1):
126
- with BlockProcessingTimer(mode="realtime"):
127
- self.block_processor.process_block(block_number)
120
+ set_current_block("realtime", current_block)
121
+ increment_blocks_processed("realtime")
122
+ set_block_lag("realtime", 0) # Head-only mode has no lag
123
+ self.last_processed_block = current_block
128
124
 
129
- # Update metrics
130
- set_current_block("realtime", block_number)
131
- increment_blocks_processed("realtime")
132
-
133
- time.sleep(self.poll_interval)
134
- self.last_processed_block = block_number
125
+ time.sleep(self.poll_interval)
135
126
 
136
127
  except KeyboardInterrupt:
137
128
  logger.info("TaskScheduler stopping due to KeyboardInterrupt.")
@@ -184,5 +175,4 @@ def task_scheduler_factory(network: str = "finney") -> TaskScheduler:
184
175
  block_processor=block_processor_factory(),
185
176
  network=network,
186
177
  poll_interval=getattr(settings, "BLOCK_DUMPER_POLL_INTERVAL", 1),
187
- realtime_head_only=getattr(settings, "BLOCK_DUMPER_REALTIME_HEAD_ONLY", True),
188
178
  )
@@ -17,7 +17,7 @@ def get_bittensor_client(network: str = "finney") -> bt.Subtensor:
17
17
  doesn't change during runtime.
18
18
  """
19
19
  logger.info("Creating new bittensor client for network", network=network)
20
- return bt.subtensor(network=network)
20
+ return bt.Subtensor(network=network)
21
21
 
22
22
 
23
23
  def get_current_celery_task_id() -> str:
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.0.8'
32
- __version_tuple__ = version_tuple = (0, 0, 8)
31
+ __version__ = version = '0.1.0'
32
+ __version_tuple__ = version_tuple = (0, 1, 0)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -1,4 +1,5 @@
1
1
  import inspect
2
+ import time
2
3
  from collections.abc import Callable
3
4
  from typing import Any, cast
4
5
 
@@ -10,6 +11,11 @@ import abstract_block_dumper._internal.dal.django_dal as abd_dal
10
11
  import abstract_block_dumper._internal.services.utils as abd_utils
11
12
  from abstract_block_dumper._internal.dal.memory_registry import RegistryItem, task_registry
12
13
  from abstract_block_dumper._internal.exceptions import CeleryTaskLockedError
14
+ from abstract_block_dumper._internal.services.metrics import (
15
+ observe_task_execution_time,
16
+ record_task_execution,
17
+ record_task_retry,
18
+ )
13
19
  from abstract_block_dumper.models import TaskAttempt
14
20
 
15
21
  logger = structlog.get_logger(__name__)
@@ -62,9 +68,15 @@ def schedule_retry(task_attempt: TaskAttempt) -> None:
62
68
  eta=task_attempt.next_retry_at,
63
69
  )
64
70
 
71
+ # Record retry metric
72
+ task_name = task_attempt.executable_path.split(".")[-1]
73
+ record_task_retry(task_name)
74
+
65
75
 
66
76
  def _celery_task_wrapper(
67
- func: Callable[..., Any], block_number: int, **kwargs: dict[str, Any]
77
+ func: Callable[..., Any],
78
+ block_number: int,
79
+ **kwargs: dict[str, Any],
68
80
  ) -> dict[str, Any] | None:
69
81
  executable_path = abd_utils.get_executable_path(func)
70
82
 
@@ -102,10 +114,20 @@ def _celery_task_wrapper(
102
114
  abd_dal.task_mark_as_started(task_attempt, abd_utils.get_current_celery_task_id())
103
115
 
104
116
  # Start task execution
117
+ # Pass _use_archive_network only if the function accepts **kwargs
118
+ # Otherwise, strip it to avoid TypeError
119
+ execution_kwargs = {"block_number": block_number, **kwargs}
120
+
121
+ # Check if function accepts **kwargs before adding _use_archive_network
122
+ sig = inspect.signature(func)
123
+ has_var_keyword = any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values())
124
+ if has_var_keyword:
125
+ execution_kwargs["_use_archive_network"] = use_archive_network
126
+
127
+ task_name = executable_path.split(".")[-1] # Get short task name
128
+ start_time = time.perf_counter()
129
+
105
130
  try:
106
- # Pass _use_archive_network only if the function accepts **kwargs
107
- # Otherwise, strip it to avoid TypeError
108
- execution_kwargs = {"block_number": block_number, **kwargs}
109
131
  logger.info(
110
132
  "Starting task execution",
111
133
  task_id=task_attempt.id,
@@ -115,27 +137,32 @@ def _celery_task_wrapper(
115
137
  use_archive_network=use_archive_network,
116
138
  )
117
139
 
118
- # Check if function accepts **kwargs before adding _use_archive_network
119
- sig = inspect.signature(func)
120
- has_var_keyword = any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values())
121
- if has_var_keyword:
122
- execution_kwargs["_use_archive_network"] = use_archive_network
123
-
124
140
  result = func(**execution_kwargs)
141
+ execution_duration = time.perf_counter() - start_time
125
142
 
126
143
  abd_dal.task_mark_as_success(task_attempt, result)
127
144
 
128
- logger.info("Task completed successfully", task_id=task_attempt.id)
145
+ # Record success metrics
146
+ record_task_execution(task_name, "success")
147
+ observe_task_execution_time(task_name, execution_duration)
148
+
149
+ logger.info("Task completed successfully", task_id=task_attempt.id, duration=execution_duration)
129
150
  return {"result": result}
130
151
  except Exception as e:
152
+ execution_duration = time.perf_counter() - start_time
131
153
  logger.exception(
132
154
  "Task execution failed",
133
155
  task_id=task_attempt.id,
134
156
  error_type=type(e).__name__,
135
157
  error_message=str(e),
158
+ duration=execution_duration,
136
159
  )
137
160
  abd_dal.task_mark_as_failed(task_attempt)
138
161
 
162
+ # Record failure metrics
163
+ record_task_execution(task_name, "failed")
164
+ observe_task_execution_time(task_name, execution_duration)
165
+
139
166
  # Schedule retry after transaction commits:
140
167
  if abd_dal.task_can_retry(task_attempt):
141
168
  try:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstract-block-dumper
3
- Version: 0.0.8
3
+ Version: 0.1.0
4
4
  Project-URL: Source, https://github.com/bactensor/abstract-block-dumper
5
5
  Project-URL: Issue Tracker, https://github.com/bactensor/abstract-block-dumper/issues
6
6
  Author-email: Reef Technologies <opensource@reef.pl>
@@ -1,5 +1,5 @@
1
1
  abstract_block_dumper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- abstract_block_dumper/_version.py,sha256=j-ar4gJGiWIawqKXhvv9hGJWLfwu0tISl-0GV97B7a0,704
2
+ abstract_block_dumper/_version.py,sha256=5jwwVncvCiTnhOedfkzzxmxsggwmTBORdFL_4wq0ZeY,704
3
3
  abstract_block_dumper/admin.py,sha256=3J3I_QOKFgfMNpTXW-rTQGO_q5Ls6uNuL0FkPVdIsYg,1654
4
4
  abstract_block_dumper/apps.py,sha256=DXATdrjsL3T2IletTbKeD6unr8ScLaxg7wz0nAHTAns,215
5
5
  abstract_block_dumper/models.py,sha256=MO9824dmHB6xF3PrFE_RERh7whVjQtS4tt6QA0wSbg0,2022
@@ -11,12 +11,12 @@ abstract_block_dumper/_internal/dal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQ
11
11
  abstract_block_dumper/_internal/dal/django_dal.py,sha256=i9jocanfptjXw5lfE2xBYvx5mo1g98IoMjlS-WjGP88,5623
12
12
  abstract_block_dumper/_internal/dal/memory_registry.py,sha256=m9Yms-cuemi9_5q_Kn_zsJnxDPEiuAUkESIAltD60QY,2943
13
13
  abstract_block_dumper/_internal/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- abstract_block_dumper/_internal/services/backfill_scheduler.py,sha256=PTBs3tN4LLvFYuXKVsHVpme66RAsnPt166RWlRNA4xQ,15955
14
+ abstract_block_dumper/_internal/services/backfill_scheduler.py,sha256=XgsVYXaz6pR4PBA9giesjhR74x1qLX2281-eQgM5qhs,16311
15
15
  abstract_block_dumper/_internal/services/block_processor.py,sha256=NC7p1oD38FpaZb6EbykBolP32uY069abumOvXrjOBV0,6644
16
- abstract_block_dumper/_internal/services/executor.py,sha256=vuAALKuP7KTQuQyG-P8JfrU22Sr-90HzXsIdfYbXZy4,2080
17
- abstract_block_dumper/_internal/services/metrics.py,sha256=FLhpq63WVccc0N6K1rN2VnV90jywFfAiG2ZxoDPALv0,3929
18
- abstract_block_dumper/_internal/services/scheduler.py,sha256=1Ls9dn_siEKl6yXJTS_B4NKhA3ZpZxrMJM_whRtEdxk,7453
19
- abstract_block_dumper/_internal/services/utils.py,sha256=QSs2hBHWOPgNgKPf_ZmADXuqEiqK5mWZp7JblvQgxZQ,1140
16
+ abstract_block_dumper/_internal/services/executor.py,sha256=WhpHhOAi4cI-qdEE8-DSt9xZwooOpSc9_uDMQBBoHUM,2317
17
+ abstract_block_dumper/_internal/services/metrics.py,sha256=Gg-PQYZ98caaS52wm1EqhtPURXlfrVjk2t3-8nccqfo,7821
18
+ abstract_block_dumper/_internal/services/scheduler.py,sha256=pFVNV1YBHujNIRW9kq_1bxaa_W1Bn_pr7DrgwKaUItw,6779
19
+ abstract_block_dumper/_internal/services/utils.py,sha256=QZxdQyWIcUnezyVmegS4g3x3BoB3-oijYJ_i9nLQWHo,1140
20
20
  abstract_block_dumper/management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
21
  abstract_block_dumper/management/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
22
  abstract_block_dumper/management/commands/backfill_blocks_v1.py,sha256=EmNUozAZn8uThjCvusZe7poNrw9RYy-MafMg2wu3XeQ,6392
@@ -25,8 +25,8 @@ abstract_block_dumper/migrations/0001_initial.py,sha256=ImPHC3G6kPkq4Xn_4YVAm4La
25
25
  abstract_block_dumper/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  abstract_block_dumper/v1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
27
  abstract_block_dumper/v1/celery.py,sha256=X4IqVs5i6ZpyY7fy1SqMZgsZy4SXP-jK2qG-FYnjU38,1722
28
- abstract_block_dumper/v1/decorators.py,sha256=Lua91tR-d0juif9VGVxnC8bzhcgyn8_1bSR33hw7IK0,8920
28
+ abstract_block_dumper/v1/decorators.py,sha256=yQglsy1dU1u7ShwaTqZLahDcybHmetibTIOi53o_ZOM,9829
29
29
  abstract_block_dumper/v1/tasks.py,sha256=u9iMYdDUqzYT3yPrNwZecHnlweZ3yFipV9BcIWHCbus,2647
30
- abstract_block_dumper-0.0.8.dist-info/METADATA,sha256=O5E4ChDm8gX0XSs_zvImjBgl9Q_DJkNu_rW5KSx-fMk,12993
31
- abstract_block_dumper-0.0.8.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
32
- abstract_block_dumper-0.0.8.dist-info/RECORD,,
30
+ abstract_block_dumper-0.1.0.dist-info/METADATA,sha256=UypVClIjNRQlbzkGmmR2Indrw3c-2gbrlbjHaXin_ys,12993
31
+ abstract_block_dumper-0.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
32
+ abstract_block_dumper-0.1.0.dist-info/RECORD,,