abstract-block-dumper 0.0.8__py3-none-any.whl → 0.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstract_block_dumper/_internal/services/backfill_scheduler.py +8 -0
- abstract_block_dumper/_internal/services/executor.py +5 -0
- abstract_block_dumper/_internal/services/metrics.py +109 -0
- abstract_block_dumper/_internal/services/scheduler.py +8 -1
- abstract_block_dumper/_version.py +2 -2
- abstract_block_dumper/v1/decorators.py +35 -10
- {abstract_block_dumper-0.0.8.dist-info → abstract_block_dumper-0.0.9.dist-info}/METADATA +1 -1
- {abstract_block_dumper-0.0.8.dist-info → abstract_block_dumper-0.0.9.dist-info}/RECORD +9 -9
- {abstract_block_dumper-0.0.8.dist-info → abstract_block_dumper-0.0.9.dist-info}/WHEEL +0 -0
|
@@ -18,8 +18,10 @@ import abstract_block_dumper._internal.services.utils as abd_utils
|
|
|
18
18
|
from abstract_block_dumper._internal.services.block_processor import BlockProcessor, block_processor_factory
|
|
19
19
|
from abstract_block_dumper._internal.services.metrics import (
|
|
20
20
|
BlockProcessingTimer,
|
|
21
|
+
increment_archive_network_usage,
|
|
21
22
|
increment_blocks_processed,
|
|
22
23
|
set_backfill_progress,
|
|
24
|
+
set_block_lag,
|
|
23
25
|
set_current_block,
|
|
24
26
|
)
|
|
25
27
|
from abstract_block_dumper._internal.services.utils import serialize_args
|
|
@@ -275,6 +277,10 @@ class BackfillScheduler:
|
|
|
275
277
|
set_backfill_progress(self.from_block, self.to_block, block_number)
|
|
276
278
|
increment_blocks_processed("backfill")
|
|
277
279
|
|
|
280
|
+
# Track block lag (distance from chain head)
|
|
281
|
+
if self._current_head_cache:
|
|
282
|
+
set_block_lag("backfill", self._current_head_cache - block_number)
|
|
283
|
+
|
|
278
284
|
# Log progress periodically
|
|
279
285
|
if processed_count % PROGRESS_LOG_INTERVAL == 0:
|
|
280
286
|
progress_pct = (processed_count / total_blocks) * 100
|
|
@@ -380,6 +386,8 @@ class BackfillScheduler:
|
|
|
380
386
|
try:
|
|
381
387
|
if registry_item.match_condition(block_number, **args):
|
|
382
388
|
use_archive = self._requires_archive_network(block_number)
|
|
389
|
+
if use_archive:
|
|
390
|
+
increment_archive_network_usage()
|
|
383
391
|
logger.debug(
|
|
384
392
|
"Backfilling block",
|
|
385
393
|
function_name=registry_item.function.__name__,
|
|
@@ -4,6 +4,7 @@ import structlog
|
|
|
4
4
|
|
|
5
5
|
import abstract_block_dumper._internal.dal.django_dal as abd_dal
|
|
6
6
|
from abstract_block_dumper._internal.dal.memory_registry import RegistryItem
|
|
7
|
+
from abstract_block_dumper._internal.services.metrics import increment_tasks_submitted
|
|
7
8
|
from abstract_block_dumper.models import TaskAttempt
|
|
8
9
|
|
|
9
10
|
logger = structlog.get_logger(__name__)
|
|
@@ -61,4 +62,8 @@ class CeleryExecutor:
|
|
|
61
62
|
# The task ID can be retrieved from the task_attempt if needed
|
|
62
63
|
registry_item.function.apply_async(**apply_async_kwargs)
|
|
63
64
|
|
|
65
|
+
# Track task submission metric
|
|
66
|
+
task_name = registry_item.executable_path.split(".")[-1]
|
|
67
|
+
increment_tasks_submitted(task_name)
|
|
68
|
+
|
|
64
69
|
logger.debug("Celery task scheduled", task_id=task_attempt.id)
|
|
@@ -33,6 +33,15 @@ BACKFILL_PROGRESS = None
|
|
|
33
33
|
BACKFILL_FROM_BLOCK = None
|
|
34
34
|
BACKFILL_TO_BLOCK = None
|
|
35
35
|
BLOCK_PROCESSING_TIME = None
|
|
36
|
+
# Task-level metrics
|
|
37
|
+
TASK_EXECUTIONS = None
|
|
38
|
+
TASK_EXECUTION_TIME = None
|
|
39
|
+
TASK_RETRIES = None
|
|
40
|
+
# Business/observability metrics
|
|
41
|
+
BLOCK_LAG = None # How far behind the chain head
|
|
42
|
+
PENDING_TASKS = None # Current pending tasks count
|
|
43
|
+
REGISTERED_TASKS = None # Number of registered task types
|
|
44
|
+
ARCHIVE_NETWORK_USAGE = None # Counter for archive network fallback
|
|
36
45
|
|
|
37
46
|
if PROMETHEUS_AVAILABLE:
|
|
38
47
|
BLOCKS_PROCESSED = Counter( # type: ignore
|
|
@@ -67,6 +76,41 @@ if PROMETHEUS_AVAILABLE:
|
|
|
67
76
|
"Time spent processing each block",
|
|
68
77
|
["mode"],
|
|
69
78
|
)
|
|
79
|
+
# Task-level metrics
|
|
80
|
+
TASK_EXECUTIONS = Counter( # type: ignore
|
|
81
|
+
"block_dumper_task_executions_total",
|
|
82
|
+
"Total task executions by status",
|
|
83
|
+
["task_name", "status"], # status: 'success', 'failed'
|
|
84
|
+
)
|
|
85
|
+
TASK_EXECUTION_TIME = Histogram( # type: ignore
|
|
86
|
+
"block_dumper_task_execution_seconds",
|
|
87
|
+
"Time spent executing each task",
|
|
88
|
+
["task_name"],
|
|
89
|
+
buckets=(0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 30.0, 60.0, 120.0),
|
|
90
|
+
)
|
|
91
|
+
TASK_RETRIES = Counter( # type: ignore
|
|
92
|
+
"block_dumper_task_retries_total",
|
|
93
|
+
"Total task retry attempts",
|
|
94
|
+
["task_name"],
|
|
95
|
+
)
|
|
96
|
+
# Business/observability metrics
|
|
97
|
+
BLOCK_LAG = Gauge( # type: ignore
|
|
98
|
+
"block_dumper_block_lag",
|
|
99
|
+
"Number of blocks behind the chain head",
|
|
100
|
+
["mode"], # 'realtime' or 'backfill'
|
|
101
|
+
)
|
|
102
|
+
PENDING_TASKS = Gauge( # type: ignore
|
|
103
|
+
"block_dumper_pending_tasks",
|
|
104
|
+
"Current number of pending tasks in queue",
|
|
105
|
+
)
|
|
106
|
+
REGISTERED_TASKS = Gauge( # type: ignore
|
|
107
|
+
"block_dumper_registered_tasks",
|
|
108
|
+
"Number of registered task types",
|
|
109
|
+
)
|
|
110
|
+
ARCHIVE_NETWORK_USAGE = Counter( # type: ignore
|
|
111
|
+
"block_dumper_archive_network_requests_total",
|
|
112
|
+
"Total requests using archive network",
|
|
113
|
+
)
|
|
70
114
|
|
|
71
115
|
|
|
72
116
|
def increment_blocks_processed(mode: str) -> None:
|
|
@@ -105,6 +149,71 @@ def set_backfill_progress(from_block: int, to_block: int, current_block: int) ->
|
|
|
105
149
|
BACKFILL_PROGRESS.set(progress)
|
|
106
150
|
|
|
107
151
|
|
|
152
|
+
def set_block_lag(mode: str, lag: int) -> None:
|
|
153
|
+
"""Set the current block lag (distance from chain head)."""
|
|
154
|
+
if PROMETHEUS_AVAILABLE and BLOCK_LAG is not None:
|
|
155
|
+
BLOCK_LAG.labels(mode=mode).set(lag)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def set_pending_tasks(count: int) -> None:
|
|
159
|
+
"""Set the current number of pending tasks."""
|
|
160
|
+
if PROMETHEUS_AVAILABLE and PENDING_TASKS is not None:
|
|
161
|
+
PENDING_TASKS.set(count)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def set_registered_tasks(count: int) -> None:
|
|
165
|
+
"""Set the number of registered task types."""
|
|
166
|
+
if PROMETHEUS_AVAILABLE and REGISTERED_TASKS is not None:
|
|
167
|
+
REGISTERED_TASKS.set(count)
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def increment_archive_network_usage() -> None:
|
|
171
|
+
"""Increment the archive network usage counter."""
|
|
172
|
+
if PROMETHEUS_AVAILABLE and ARCHIVE_NETWORK_USAGE is not None:
|
|
173
|
+
ARCHIVE_NETWORK_USAGE.inc()
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def record_task_execution(task_name: str, status: str) -> None:
|
|
177
|
+
"""Record a task execution with status (success/failed)."""
|
|
178
|
+
if PROMETHEUS_AVAILABLE and TASK_EXECUTIONS is not None:
|
|
179
|
+
TASK_EXECUTIONS.labels(task_name=task_name, status=status).inc()
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def record_task_retry(task_name: str) -> None:
|
|
183
|
+
"""Record a task retry attempt."""
|
|
184
|
+
if PROMETHEUS_AVAILABLE and TASK_RETRIES is not None:
|
|
185
|
+
TASK_RETRIES.labels(task_name=task_name).inc()
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def observe_task_execution_time(task_name: str, duration: float) -> None:
|
|
189
|
+
"""Record task execution duration in seconds."""
|
|
190
|
+
if PROMETHEUS_AVAILABLE and TASK_EXECUTION_TIME is not None:
|
|
191
|
+
TASK_EXECUTION_TIME.labels(task_name=task_name).observe(duration)
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
class TaskExecutionTimer:
|
|
195
|
+
"""Context manager for timing task execution."""
|
|
196
|
+
|
|
197
|
+
def __init__(self, task_name: str) -> None:
|
|
198
|
+
self.task_name = task_name
|
|
199
|
+
self._timer: Any = None
|
|
200
|
+
|
|
201
|
+
def __enter__(self) -> Self:
|
|
202
|
+
if PROMETHEUS_AVAILABLE and TASK_EXECUTION_TIME is not None:
|
|
203
|
+
self._timer = TASK_EXECUTION_TIME.labels(task_name=self.task_name).time()
|
|
204
|
+
self._timer.__enter__()
|
|
205
|
+
return self
|
|
206
|
+
|
|
207
|
+
def __exit__(
|
|
208
|
+
self,
|
|
209
|
+
exc_type: type[BaseException] | None,
|
|
210
|
+
exc_val: BaseException | None,
|
|
211
|
+
exc_tb: TracebackType | None,
|
|
212
|
+
) -> None:
|
|
213
|
+
if self._timer is not None:
|
|
214
|
+
self._timer.__exit__(exc_type, exc_val, exc_tb)
|
|
215
|
+
|
|
216
|
+
|
|
108
217
|
class BlockProcessingTimer:
|
|
109
218
|
"""Context manager for timing block processing."""
|
|
110
219
|
|
|
@@ -10,7 +10,9 @@ from abstract_block_dumper._internal.services.block_processor import BlockProces
|
|
|
10
10
|
from abstract_block_dumper._internal.services.metrics import (
|
|
11
11
|
BlockProcessingTimer,
|
|
12
12
|
increment_blocks_processed,
|
|
13
|
+
set_block_lag,
|
|
13
14
|
set_current_block,
|
|
15
|
+
set_registered_tasks,
|
|
14
16
|
)
|
|
15
17
|
|
|
16
18
|
logger = structlog.get_logger(__name__)
|
|
@@ -94,10 +96,13 @@ class TaskScheduler:
|
|
|
94
96
|
|
|
95
97
|
self.initialize_last_block()
|
|
96
98
|
|
|
99
|
+
registered_tasks_count = len(self.block_processor.registry.get_functions())
|
|
100
|
+
set_registered_tasks(registered_tasks_count)
|
|
101
|
+
|
|
97
102
|
logger.info(
|
|
98
103
|
"TaskScheduler started",
|
|
99
104
|
last_processed_block=self.last_processed_block,
|
|
100
|
-
registry_functions=
|
|
105
|
+
registry_functions=registered_tasks_count,
|
|
101
106
|
)
|
|
102
107
|
|
|
103
108
|
while self.is_running:
|
|
@@ -117,6 +122,7 @@ class TaskScheduler:
|
|
|
117
122
|
|
|
118
123
|
set_current_block("realtime", current_block)
|
|
119
124
|
increment_blocks_processed("realtime")
|
|
125
|
+
set_block_lag("realtime", 0) # Head-only mode has no lag
|
|
120
126
|
self.last_processed_block = current_block
|
|
121
127
|
|
|
122
128
|
time.sleep(self.poll_interval)
|
|
@@ -129,6 +135,7 @@ class TaskScheduler:
|
|
|
129
135
|
# Update metrics
|
|
130
136
|
set_current_block("realtime", block_number)
|
|
131
137
|
increment_blocks_processed("realtime")
|
|
138
|
+
set_block_lag("realtime", current_block - block_number)
|
|
132
139
|
|
|
133
140
|
time.sleep(self.poll_interval)
|
|
134
141
|
self.last_processed_block = block_number
|
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '0.0.
|
|
32
|
-
__version_tuple__ = version_tuple = (0, 0,
|
|
31
|
+
__version__ = version = '0.0.9'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 0, 9)
|
|
33
33
|
|
|
34
34
|
__commit_id__ = commit_id = None
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import inspect
|
|
2
|
+
import time
|
|
2
3
|
from collections.abc import Callable
|
|
3
4
|
from typing import Any, cast
|
|
4
5
|
|
|
@@ -10,6 +11,11 @@ import abstract_block_dumper._internal.dal.django_dal as abd_dal
|
|
|
10
11
|
import abstract_block_dumper._internal.services.utils as abd_utils
|
|
11
12
|
from abstract_block_dumper._internal.dal.memory_registry import RegistryItem, task_registry
|
|
12
13
|
from abstract_block_dumper._internal.exceptions import CeleryTaskLockedError
|
|
14
|
+
from abstract_block_dumper._internal.services.metrics import (
|
|
15
|
+
observe_task_execution_time,
|
|
16
|
+
record_task_execution,
|
|
17
|
+
record_task_retry,
|
|
18
|
+
)
|
|
13
19
|
from abstract_block_dumper.models import TaskAttempt
|
|
14
20
|
|
|
15
21
|
logger = structlog.get_logger(__name__)
|
|
@@ -62,6 +68,10 @@ def schedule_retry(task_attempt: TaskAttempt) -> None:
|
|
|
62
68
|
eta=task_attempt.next_retry_at,
|
|
63
69
|
)
|
|
64
70
|
|
|
71
|
+
# Record retry metric
|
|
72
|
+
task_name = task_attempt.executable_path.split(".")[-1]
|
|
73
|
+
record_task_retry(task_name)
|
|
74
|
+
|
|
65
75
|
|
|
66
76
|
def _celery_task_wrapper(
|
|
67
77
|
func: Callable[..., Any], block_number: int, **kwargs: dict[str, Any]
|
|
@@ -102,10 +112,20 @@ def _celery_task_wrapper(
|
|
|
102
112
|
abd_dal.task_mark_as_started(task_attempt, abd_utils.get_current_celery_task_id())
|
|
103
113
|
|
|
104
114
|
# Start task execution
|
|
115
|
+
# Pass _use_archive_network only if the function accepts **kwargs
|
|
116
|
+
# Otherwise, strip it to avoid TypeError
|
|
117
|
+
execution_kwargs = {"block_number": block_number, **kwargs}
|
|
118
|
+
|
|
119
|
+
# Check if function accepts **kwargs before adding _use_archive_network
|
|
120
|
+
sig = inspect.signature(func)
|
|
121
|
+
has_var_keyword = any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values())
|
|
122
|
+
if has_var_keyword:
|
|
123
|
+
execution_kwargs["_use_archive_network"] = use_archive_network
|
|
124
|
+
|
|
125
|
+
task_name = executable_path.split(".")[-1] # Get short task name
|
|
126
|
+
start_time = time.perf_counter()
|
|
127
|
+
|
|
105
128
|
try:
|
|
106
|
-
# Pass _use_archive_network only if the function accepts **kwargs
|
|
107
|
-
# Otherwise, strip it to avoid TypeError
|
|
108
|
-
execution_kwargs = {"block_number": block_number, **kwargs}
|
|
109
129
|
logger.info(
|
|
110
130
|
"Starting task execution",
|
|
111
131
|
task_id=task_attempt.id,
|
|
@@ -115,27 +135,32 @@ def _celery_task_wrapper(
|
|
|
115
135
|
use_archive_network=use_archive_network,
|
|
116
136
|
)
|
|
117
137
|
|
|
118
|
-
# Check if function accepts **kwargs before adding _use_archive_network
|
|
119
|
-
sig = inspect.signature(func)
|
|
120
|
-
has_var_keyword = any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values())
|
|
121
|
-
if has_var_keyword:
|
|
122
|
-
execution_kwargs["_use_archive_network"] = use_archive_network
|
|
123
|
-
|
|
124
138
|
result = func(**execution_kwargs)
|
|
139
|
+
execution_duration = time.perf_counter() - start_time
|
|
125
140
|
|
|
126
141
|
abd_dal.task_mark_as_success(task_attempt, result)
|
|
127
142
|
|
|
128
|
-
|
|
143
|
+
# Record success metrics
|
|
144
|
+
record_task_execution(task_name, "success")
|
|
145
|
+
observe_task_execution_time(task_name, execution_duration)
|
|
146
|
+
|
|
147
|
+
logger.info("Task completed successfully", task_id=task_attempt.id, duration=execution_duration)
|
|
129
148
|
return {"result": result}
|
|
130
149
|
except Exception as e:
|
|
150
|
+
execution_duration = time.perf_counter() - start_time
|
|
131
151
|
logger.exception(
|
|
132
152
|
"Task execution failed",
|
|
133
153
|
task_id=task_attempt.id,
|
|
134
154
|
error_type=type(e).__name__,
|
|
135
155
|
error_message=str(e),
|
|
156
|
+
duration=execution_duration,
|
|
136
157
|
)
|
|
137
158
|
abd_dal.task_mark_as_failed(task_attempt)
|
|
138
159
|
|
|
160
|
+
# Record failure metrics
|
|
161
|
+
record_task_execution(task_name, "failed")
|
|
162
|
+
observe_task_execution_time(task_name, execution_duration)
|
|
163
|
+
|
|
139
164
|
# Schedule retry after transaction commits:
|
|
140
165
|
if abd_dal.task_can_retry(task_attempt):
|
|
141
166
|
try:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: abstract-block-dumper
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.9
|
|
4
4
|
Project-URL: Source, https://github.com/bactensor/abstract-block-dumper
|
|
5
5
|
Project-URL: Issue Tracker, https://github.com/bactensor/abstract-block-dumper/issues
|
|
6
6
|
Author-email: Reef Technologies <opensource@reef.pl>
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
abstract_block_dumper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
abstract_block_dumper/_version.py,sha256=
|
|
2
|
+
abstract_block_dumper/_version.py,sha256=X2FLFwBoUmgJPsOV-1l-SxIXNSTX1TQ036Kf2j9Mc68,704
|
|
3
3
|
abstract_block_dumper/admin.py,sha256=3J3I_QOKFgfMNpTXW-rTQGO_q5Ls6uNuL0FkPVdIsYg,1654
|
|
4
4
|
abstract_block_dumper/apps.py,sha256=DXATdrjsL3T2IletTbKeD6unr8ScLaxg7wz0nAHTAns,215
|
|
5
5
|
abstract_block_dumper/models.py,sha256=MO9824dmHB6xF3PrFE_RERh7whVjQtS4tt6QA0wSbg0,2022
|
|
@@ -11,11 +11,11 @@ abstract_block_dumper/_internal/dal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQ
|
|
|
11
11
|
abstract_block_dumper/_internal/dal/django_dal.py,sha256=i9jocanfptjXw5lfE2xBYvx5mo1g98IoMjlS-WjGP88,5623
|
|
12
12
|
abstract_block_dumper/_internal/dal/memory_registry.py,sha256=m9Yms-cuemi9_5q_Kn_zsJnxDPEiuAUkESIAltD60QY,2943
|
|
13
13
|
abstract_block_dumper/_internal/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
14
|
-
abstract_block_dumper/_internal/services/backfill_scheduler.py,sha256=
|
|
14
|
+
abstract_block_dumper/_internal/services/backfill_scheduler.py,sha256=XgsVYXaz6pR4PBA9giesjhR74x1qLX2281-eQgM5qhs,16311
|
|
15
15
|
abstract_block_dumper/_internal/services/block_processor.py,sha256=NC7p1oD38FpaZb6EbykBolP32uY069abumOvXrjOBV0,6644
|
|
16
|
-
abstract_block_dumper/_internal/services/executor.py,sha256=
|
|
17
|
-
abstract_block_dumper/_internal/services/metrics.py,sha256=
|
|
18
|
-
abstract_block_dumper/_internal/services/scheduler.py,sha256=
|
|
16
|
+
abstract_block_dumper/_internal/services/executor.py,sha256=WhpHhOAi4cI-qdEE8-DSt9xZwooOpSc9_uDMQBBoHUM,2317
|
|
17
|
+
abstract_block_dumper/_internal/services/metrics.py,sha256=Gg-PQYZ98caaS52wm1EqhtPURXlfrVjk2t3-8nccqfo,7821
|
|
18
|
+
abstract_block_dumper/_internal/services/scheduler.py,sha256=B62ZoOxSv8i5mverBWv67BzoyBd14f0wWnNQ329pJds,7770
|
|
19
19
|
abstract_block_dumper/_internal/services/utils.py,sha256=QSs2hBHWOPgNgKPf_ZmADXuqEiqK5mWZp7JblvQgxZQ,1140
|
|
20
20
|
abstract_block_dumper/management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
21
|
abstract_block_dumper/management/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -25,8 +25,8 @@ abstract_block_dumper/migrations/0001_initial.py,sha256=ImPHC3G6kPkq4Xn_4YVAm4La
|
|
|
25
25
|
abstract_block_dumper/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
26
|
abstract_block_dumper/v1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
27
27
|
abstract_block_dumper/v1/celery.py,sha256=X4IqVs5i6ZpyY7fy1SqMZgsZy4SXP-jK2qG-FYnjU38,1722
|
|
28
|
-
abstract_block_dumper/v1/decorators.py,sha256=
|
|
28
|
+
abstract_block_dumper/v1/decorators.py,sha256=4FXsBScT_5Wadl8FadRcZZtyLytZUzSTX4V5DI0IuRs,9820
|
|
29
29
|
abstract_block_dumper/v1/tasks.py,sha256=u9iMYdDUqzYT3yPrNwZecHnlweZ3yFipV9BcIWHCbus,2647
|
|
30
|
-
abstract_block_dumper-0.0.
|
|
31
|
-
abstract_block_dumper-0.0.
|
|
32
|
-
abstract_block_dumper-0.0.
|
|
30
|
+
abstract_block_dumper-0.0.9.dist-info/METADATA,sha256=xWskTf2HCMNuFGA5M2KSyyejjBLKgQ6OBv2Q5VuL5_I,12993
|
|
31
|
+
abstract_block_dumper-0.0.9.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
32
|
+
abstract_block_dumper-0.0.9.dist-info/RECORD,,
|
|
File without changes
|