brawny 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. brawny/__init__.py +106 -0
  2. brawny/_context.py +232 -0
  3. brawny/_rpc/__init__.py +38 -0
  4. brawny/_rpc/broadcast.py +172 -0
  5. brawny/_rpc/clients.py +98 -0
  6. brawny/_rpc/context.py +49 -0
  7. brawny/_rpc/errors.py +252 -0
  8. brawny/_rpc/gas.py +158 -0
  9. brawny/_rpc/manager.py +982 -0
  10. brawny/_rpc/selector.py +156 -0
  11. brawny/accounts.py +534 -0
  12. brawny/alerts/__init__.py +132 -0
  13. brawny/alerts/abi_resolver.py +530 -0
  14. brawny/alerts/base.py +152 -0
  15. brawny/alerts/context.py +271 -0
  16. brawny/alerts/contracts.py +635 -0
  17. brawny/alerts/encoded_call.py +201 -0
  18. brawny/alerts/errors.py +267 -0
  19. brawny/alerts/events.py +680 -0
  20. brawny/alerts/function_caller.py +364 -0
  21. brawny/alerts/health.py +185 -0
  22. brawny/alerts/routing.py +118 -0
  23. brawny/alerts/send.py +364 -0
  24. brawny/api.py +660 -0
  25. brawny/chain.py +93 -0
  26. brawny/cli/__init__.py +16 -0
  27. brawny/cli/app.py +17 -0
  28. brawny/cli/bootstrap.py +37 -0
  29. brawny/cli/commands/__init__.py +41 -0
  30. brawny/cli/commands/abi.py +93 -0
  31. brawny/cli/commands/accounts.py +632 -0
  32. brawny/cli/commands/console.py +495 -0
  33. brawny/cli/commands/contract.py +139 -0
  34. brawny/cli/commands/health.py +112 -0
  35. brawny/cli/commands/init_project.py +86 -0
  36. brawny/cli/commands/intents.py +130 -0
  37. brawny/cli/commands/job_dev.py +254 -0
  38. brawny/cli/commands/jobs.py +308 -0
  39. brawny/cli/commands/logs.py +87 -0
  40. brawny/cli/commands/maintenance.py +182 -0
  41. brawny/cli/commands/migrate.py +51 -0
  42. brawny/cli/commands/networks.py +253 -0
  43. brawny/cli/commands/run.py +249 -0
  44. brawny/cli/commands/script.py +209 -0
  45. brawny/cli/commands/signer.py +248 -0
  46. brawny/cli/helpers.py +265 -0
  47. brawny/cli_templates.py +1445 -0
  48. brawny/config/__init__.py +74 -0
  49. brawny/config/models.py +404 -0
  50. brawny/config/parser.py +633 -0
  51. brawny/config/routing.py +55 -0
  52. brawny/config/validation.py +246 -0
  53. brawny/daemon/__init__.py +14 -0
  54. brawny/daemon/context.py +69 -0
  55. brawny/daemon/core.py +702 -0
  56. brawny/daemon/loops.py +327 -0
  57. brawny/db/__init__.py +78 -0
  58. brawny/db/base.py +986 -0
  59. brawny/db/base_new.py +165 -0
  60. brawny/db/circuit_breaker.py +97 -0
  61. brawny/db/global_cache.py +298 -0
  62. brawny/db/mappers.py +182 -0
  63. brawny/db/migrate.py +349 -0
  64. brawny/db/migrations/001_init.sql +186 -0
  65. brawny/db/migrations/002_add_included_block.sql +7 -0
  66. brawny/db/migrations/003_add_broadcast_at.sql +10 -0
  67. brawny/db/migrations/004_broadcast_binding.sql +20 -0
  68. brawny/db/migrations/005_add_retry_after.sql +9 -0
  69. brawny/db/migrations/006_add_retry_count_column.sql +11 -0
  70. brawny/db/migrations/007_add_gap_tracking.sql +18 -0
  71. brawny/db/migrations/008_add_transactions.sql +72 -0
  72. brawny/db/migrations/009_add_intent_metadata.sql +5 -0
  73. brawny/db/migrations/010_add_nonce_gap_index.sql +9 -0
  74. brawny/db/migrations/011_add_job_logs.sql +24 -0
  75. brawny/db/migrations/012_add_claimed_by.sql +5 -0
  76. brawny/db/ops/__init__.py +29 -0
  77. brawny/db/ops/attempts.py +108 -0
  78. brawny/db/ops/blocks.py +83 -0
  79. brawny/db/ops/cache.py +93 -0
  80. brawny/db/ops/intents.py +296 -0
  81. brawny/db/ops/jobs.py +110 -0
  82. brawny/db/ops/logs.py +97 -0
  83. brawny/db/ops/nonces.py +322 -0
  84. brawny/db/postgres.py +2535 -0
  85. brawny/db/postgres_new.py +196 -0
  86. brawny/db/queries.py +584 -0
  87. brawny/db/sqlite.py +2733 -0
  88. brawny/db/sqlite_new.py +191 -0
  89. brawny/history.py +126 -0
  90. brawny/interfaces.py +136 -0
  91. brawny/invariants.py +155 -0
  92. brawny/jobs/__init__.py +26 -0
  93. brawny/jobs/base.py +287 -0
  94. brawny/jobs/discovery.py +233 -0
  95. brawny/jobs/job_validation.py +111 -0
  96. brawny/jobs/kv.py +125 -0
  97. brawny/jobs/registry.py +283 -0
  98. brawny/keystore.py +484 -0
  99. brawny/lifecycle.py +551 -0
  100. brawny/logging.py +290 -0
  101. brawny/metrics.py +594 -0
  102. brawny/model/__init__.py +53 -0
  103. brawny/model/contexts.py +319 -0
  104. brawny/model/enums.py +70 -0
  105. brawny/model/errors.py +194 -0
  106. brawny/model/events.py +93 -0
  107. brawny/model/startup.py +20 -0
  108. brawny/model/types.py +483 -0
  109. brawny/networks/__init__.py +96 -0
  110. brawny/networks/config.py +269 -0
  111. brawny/networks/manager.py +423 -0
  112. brawny/obs/__init__.py +67 -0
  113. brawny/obs/emit.py +158 -0
  114. brawny/obs/health.py +175 -0
  115. brawny/obs/heartbeat.py +133 -0
  116. brawny/reconciliation.py +108 -0
  117. brawny/scheduler/__init__.py +19 -0
  118. brawny/scheduler/poller.py +472 -0
  119. brawny/scheduler/reorg.py +632 -0
  120. brawny/scheduler/runner.py +708 -0
  121. brawny/scheduler/shutdown.py +371 -0
  122. brawny/script_tx.py +297 -0
  123. brawny/scripting.py +251 -0
  124. brawny/startup.py +76 -0
  125. brawny/telegram.py +393 -0
  126. brawny/testing.py +108 -0
  127. brawny/tx/__init__.py +41 -0
  128. brawny/tx/executor.py +1071 -0
  129. brawny/tx/fees.py +50 -0
  130. brawny/tx/intent.py +423 -0
  131. brawny/tx/monitor.py +628 -0
  132. brawny/tx/nonce.py +498 -0
  133. brawny/tx/replacement.py +456 -0
  134. brawny/tx/utils.py +26 -0
  135. brawny/utils.py +205 -0
  136. brawny/validation.py +69 -0
  137. brawny-0.1.13.dist-info/METADATA +156 -0
  138. brawny-0.1.13.dist-info/RECORD +141 -0
  139. brawny-0.1.13.dist-info/WHEEL +5 -0
  140. brawny-0.1.13.dist-info/entry_points.txt +2 -0
  141. brawny-0.1.13.dist-info/top_level.txt +1 -0
brawny/metrics.py ADDED
@@ -0,0 +1,594 @@
1
+ """Metrics abstractions for brawny.
2
+
3
+ Provides a pluggable metrics interface that can be backed by
4
+ Prometheus, StatsD, DataDog, or any other metrics system.
5
+
6
+ Usage:
7
+ from brawny.metrics import get_metrics
8
+
9
+ metrics = get_metrics()
10
+ metrics.counter("brawny_tx_confirmed_total").inc(job_id="my_job")
11
+ metrics.gauge("brawny_pending_intents").set(5, chain_id=1)
12
+ with metrics.histogram("brawny_tx_confirmation_seconds").time():
13
+ await wait_for_confirmation()
14
+ """
15
+
16
+ from __future__ import annotations
17
+
18
+ import time
19
+ from abc import ABC, abstractmethod
20
+ from contextlib import contextmanager
21
+ from dataclasses import dataclass, field
22
+ from typing import Any, Iterator, Protocol
23
+
24
+ from prometheus_client import (
25
+ CollectorRegistry,
26
+ Counter as PromCounter,
27
+ Gauge as PromGauge,
28
+ Histogram as PromHistogram,
29
+ start_http_server,
30
+ )
31
+
32
+ class Counter(Protocol):
33
+ """Counter metric that only goes up."""
34
+
35
+ def inc(self, value: int = 1, **labels: Any) -> None:
36
+ """Increment counter by value."""
37
+ ...
38
+
39
+
40
+ class Gauge(Protocol):
41
+ """Gauge metric that can go up or down."""
42
+
43
+ def set(self, value: float, **labels: Any) -> None:
44
+ """Set gauge to value."""
45
+ ...
46
+
47
+ def inc(self, value: float = 1.0, **labels: Any) -> None:
48
+ """Increment gauge by value."""
49
+ ...
50
+
51
+ def dec(self, value: float = 1.0, **labels: Any) -> None:
52
+ """Decrement gauge by value."""
53
+ ...
54
+
55
+
56
+ class Histogram(Protocol):
57
+ """Histogram metric for distributions."""
58
+
59
+ def observe(self, value: float, **labels: Any) -> None:
60
+ """Observe a value."""
61
+ ...
62
+
63
+ @contextmanager
64
+ def time(self, **labels: Any) -> Iterator[None]:
65
+ """Time a block of code."""
66
+ ...
67
+
68
+
69
+ class MetricsProvider(ABC):
70
+ """Abstract metrics provider interface."""
71
+
72
+ @abstractmethod
73
+ def counter(self, name: str) -> Counter:
74
+ """Get or create a counter."""
75
+ ...
76
+
77
+ @abstractmethod
78
+ def gauge(self, name: str) -> Gauge:
79
+ """Get or create a gauge."""
80
+ ...
81
+
82
+ @abstractmethod
83
+ def histogram(self, name: str, buckets: list[float] | None = None) -> Histogram:
84
+ """Get or create a histogram."""
85
+ ...
86
+
87
+
88
+ @dataclass
89
+ class NoOpCounter:
90
+ """Counter that does nothing (for when metrics are disabled)."""
91
+
92
+ name: str
93
+
94
+ def inc(self, value: int = 1, **labels: Any) -> None:
95
+ """No-op increment."""
96
+ pass
97
+
98
+
99
+ @dataclass
100
+ class NoOpGauge:
101
+ """Gauge that does nothing (for when metrics are disabled)."""
102
+
103
+ name: str
104
+
105
+ def set(self, value: float, **labels: Any) -> None:
106
+ """No-op set."""
107
+ pass
108
+
109
+ def inc(self, value: float = 1.0, **labels: Any) -> None:
110
+ """No-op increment."""
111
+ pass
112
+
113
+ def dec(self, value: float = 1.0, **labels: Any) -> None:
114
+ """No-op decrement."""
115
+ pass
116
+
117
+
118
+ @dataclass
119
+ class NoOpHistogram:
120
+ """Histogram that does nothing (for when metrics are disabled)."""
121
+
122
+ name: str
123
+
124
+ def observe(self, value: float, **labels: Any) -> None:
125
+ """No-op observe."""
126
+ pass
127
+
128
+ @contextmanager
129
+ def time(self, **labels: Any) -> Iterator[None]:
130
+ """No-op timer."""
131
+ yield
132
+
133
+
134
+ class NoOpMetricsProvider(MetricsProvider):
135
+ """Metrics provider that does nothing."""
136
+
137
+ def __init__(self) -> None:
138
+ self._counters: dict[str, NoOpCounter] = {}
139
+ self._gauges: dict[str, NoOpGauge] = {}
140
+ self._histograms: dict[str, NoOpHistogram] = {}
141
+
142
+ def counter(self, name: str) -> Counter:
143
+ if name not in self._counters:
144
+ self._counters[name] = NoOpCounter(name)
145
+ return self._counters[name]
146
+
147
+ def gauge(self, name: str) -> Gauge:
148
+ if name not in self._gauges:
149
+ self._gauges[name] = NoOpGauge(name)
150
+ return self._gauges[name]
151
+
152
+ def histogram(self, name: str, buckets: list[float] | None = None) -> Histogram:
153
+ if name not in self._histograms:
154
+ self._histograms[name] = NoOpHistogram(name)
155
+ return self._histograms[name]
156
+
157
+
158
+ @dataclass
159
+ class InMemoryCounter:
160
+ """In-memory counter for testing and development."""
161
+
162
+ name: str
163
+ values: dict[tuple[tuple[str, Any], ...], float] = field(default_factory=dict)
164
+
165
+ def inc(self, value: int = 1, **labels: Any) -> None:
166
+ """Increment counter by value."""
167
+ key = tuple(sorted(labels.items()))
168
+ self.values[key] = self.values.get(key, 0) + value
169
+
170
+ def get(self, **labels: Any) -> float:
171
+ """Get current value for labels."""
172
+ key = tuple(sorted(labels.items()))
173
+ return self.values.get(key, 0)
174
+
175
+
176
+ @dataclass
177
+ class InMemoryGauge:
178
+ """In-memory gauge for testing and development."""
179
+
180
+ name: str
181
+ values: dict[tuple[tuple[str, Any], ...], float] = field(default_factory=dict)
182
+
183
+ def set(self, value: float, **labels: Any) -> None:
184
+ """Set gauge to value."""
185
+ key = tuple(sorted(labels.items()))
186
+ self.values[key] = value
187
+
188
+ def inc(self, value: float = 1.0, **labels: Any) -> None:
189
+ """Increment gauge by value."""
190
+ key = tuple(sorted(labels.items()))
191
+ self.values[key] = self.values.get(key, 0) + value
192
+
193
+ def dec(self, value: float = 1.0, **labels: Any) -> None:
194
+ """Decrement gauge by value."""
195
+ key = tuple(sorted(labels.items()))
196
+ self.values[key] = self.values.get(key, 0) - value
197
+
198
+ def get(self, **labels: Any) -> float:
199
+ """Get current value for labels."""
200
+ key = tuple(sorted(labels.items()))
201
+ return self.values.get(key, 0)
202
+
203
+
204
+ @dataclass
205
+ class InMemoryHistogram:
206
+ """In-memory histogram for testing and development."""
207
+
208
+ name: str
209
+ buckets: list[float] = field(default_factory=lambda: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10])
210
+ observations: dict[tuple[tuple[str, Any], ...], list[float]] = field(default_factory=dict)
211
+
212
+ def observe(self, value: float, **labels: Any) -> None:
213
+ """Observe a value."""
214
+ key = tuple(sorted(labels.items()))
215
+ if key not in self.observations:
216
+ self.observations[key] = []
217
+ self.observations[key].append(value)
218
+
219
+ @contextmanager
220
+ def time(self, **labels: Any) -> Iterator[None]:
221
+ """Time a block of code."""
222
+ start = time.perf_counter()
223
+ try:
224
+ yield
225
+ finally:
226
+ duration = time.perf_counter() - start
227
+ self.observe(duration, **labels)
228
+
229
+ def get_observations(self, **labels: Any) -> list[float]:
230
+ """Get all observations for labels."""
231
+ key = tuple(sorted(labels.items()))
232
+ return self.observations.get(key, [])
233
+
234
+
235
+ class InMemoryMetricsProvider(MetricsProvider):
236
+ """In-memory metrics provider for testing and development."""
237
+
238
+ def __init__(self) -> None:
239
+ self._counters: dict[str, InMemoryCounter] = {}
240
+ self._gauges: dict[str, InMemoryGauge] = {}
241
+ self._histograms: dict[str, InMemoryHistogram] = {}
242
+
243
+ def counter(self, name: str) -> InMemoryCounter:
244
+ if name not in self._counters:
245
+ self._counters[name] = InMemoryCounter(name)
246
+ return self._counters[name]
247
+
248
+ def gauge(self, name: str) -> InMemoryGauge:
249
+ if name not in self._gauges:
250
+ self._gauges[name] = InMemoryGauge(name)
251
+ return self._gauges[name]
252
+
253
+ def histogram(self, name: str, buckets: list[float] | None = None) -> InMemoryHistogram:
254
+ if name not in self._histograms:
255
+ resolved_buckets = _resolve_histogram_buckets(name, buckets)
256
+ self._histograms[name] = InMemoryHistogram(
257
+ name,
258
+ resolved_buckets or DEFAULT_LATENCY_BUCKETS,
259
+ )
260
+ return self._histograms[name]
261
+
262
+ def reset(self) -> None:
263
+ """Reset all metrics."""
264
+ self._counters.clear()
265
+ self._gauges.clear()
266
+ self._histograms.clear()
267
+
268
+
269
+ # =========================================================================
270
+ # Prometheus provider
271
+ # =========================================================================
272
+
273
+
274
+ class _PrometheusMetric:
275
+ def __init__(self, metric: Any, labelnames: list[str]) -> None:
276
+ self._metric = metric
277
+ self._labelnames = labelnames
278
+
279
+ def _labels(self, labels: dict[str, Any]) -> Any:
280
+ if not self._labelnames:
281
+ return self._metric
282
+ normalized: dict[str, Any] = {}
283
+ for name in self._labelnames:
284
+ normalized[name] = labels.get(name, "unknown")
285
+ return self._metric.labels(**normalized)
286
+
287
+
288
+ class PrometheusCounter(_PrometheusMetric):
289
+ def inc(self, value: int = 1, **labels: Any) -> None:
290
+ self._labels(labels).inc(value)
291
+
292
+
293
+ class PrometheusGauge(_PrometheusMetric):
294
+ def set(self, value: float, **labels: Any) -> None:
295
+ self._labels(labels).set(value)
296
+
297
+ def inc(self, value: float = 1.0, **labels: Any) -> None:
298
+ self._labels(labels).inc(value)
299
+
300
+ def dec(self, value: float = 1.0, **labels: Any) -> None:
301
+ self._labels(labels).dec(value)
302
+
303
+
304
+ class PrometheusHistogram(_PrometheusMetric):
305
+ def observe(self, value: float, **labels: Any) -> None:
306
+ self._labels(labels).observe(value)
307
+
308
+ @contextmanager
309
+ def time(self, **labels: Any) -> Iterator[None]:
310
+ start = time.perf_counter()
311
+ try:
312
+ yield
313
+ finally:
314
+ duration = time.perf_counter() - start
315
+ self.observe(duration, **labels)
316
+
317
+
318
+ class PrometheusMetricsProvider(MetricsProvider):
319
+ def __init__(self, registry: CollectorRegistry | None = None) -> None:
320
+ self._registry = registry or CollectorRegistry()
321
+ self._counters: dict[str, PrometheusCounter] = {}
322
+ self._gauges: dict[str, PrometheusGauge] = {}
323
+ self._histograms: dict[str, PrometheusHistogram] = {}
324
+
325
+ @property
326
+ def registry(self) -> CollectorRegistry:
327
+ return self._registry
328
+
329
+ def counter(self, name: str) -> PrometheusCounter:
330
+ if name not in self._counters:
331
+ labelnames = METRIC_LABELS.get(name, [])
332
+ metric = PromCounter(name, METRIC_DESCRIPTIONS.get(name, name), labelnames, registry=self._registry)
333
+ self._counters[name] = PrometheusCounter(metric, labelnames)
334
+ return self._counters[name]
335
+
336
+ def gauge(self, name: str) -> PrometheusGauge:
337
+ if name not in self._gauges:
338
+ labelnames = METRIC_LABELS.get(name, [])
339
+ metric = PromGauge(name, METRIC_DESCRIPTIONS.get(name, name), labelnames, registry=self._registry)
340
+ self._gauges[name] = PrometheusGauge(metric, labelnames)
341
+ return self._gauges[name]
342
+
343
+ def histogram(self, name: str, buckets: list[float] | None = None) -> PrometheusHistogram:
344
+ if name not in self._histograms:
345
+ labelnames = METRIC_LABELS.get(name, [])
346
+ resolved_buckets = _resolve_histogram_buckets(name, buckets)
347
+ metric_kwargs = {
348
+ "labelnames": labelnames,
349
+ "registry": self._registry,
350
+ }
351
+ if resolved_buckets is not None:
352
+ metric_kwargs["buckets"] = resolved_buckets
353
+ metric = PromHistogram(
354
+ name,
355
+ METRIC_DESCRIPTIONS.get(name, name),
356
+ **metric_kwargs,
357
+ )
358
+ self._histograms[name] = PrometheusHistogram(metric, labelnames)
359
+ return self._histograms[name]
360
+
361
+
362
+ def start_metrics_server(bind: str, provider: PrometheusMetricsProvider) -> None:
363
+ host, port_str = bind.rsplit(":", 1)
364
+ port = int(port_str)
365
+ start_http_server(port, addr=host, registry=provider.registry)
366
+
367
+
368
+ # Default histogram buckets for common use cases
369
+ DEFAULT_LATENCY_BUCKETS = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10]
370
+ DEFAULT_TX_CONFIRMATION_BUCKETS = [10, 30, 60, 120, 300, 600, 1800, 3600]
371
+ DEFAULT_BLOCK_PROCESSING_BUCKETS = [0.01, 0.05, 0.1, 0.25, 0.5, 1, 2, 5]
372
+
373
+ # Histogram bucket overrides for known metrics.
374
+ HISTOGRAM_BUCKETS = {
375
+ "brawny_block_processing_seconds": DEFAULT_BLOCK_PROCESSING_BUCKETS,
376
+ "brawny_tx_confirmation_seconds": DEFAULT_TX_CONFIRMATION_BUCKETS,
377
+ "brawny_rpc_request_seconds": DEFAULT_LATENCY_BUCKETS,
378
+ "brawny_job_check_seconds": DEFAULT_LATENCY_BUCKETS,
379
+ "brawny_broadcast_latency_seconds": DEFAULT_LATENCY_BUCKETS,
380
+ }
381
+
382
+
383
+ def _resolve_histogram_buckets(name: str, buckets: list[float] | None) -> list[float] | None:
384
+ if buckets is not None:
385
+ return buckets
386
+ return HISTOGRAM_BUCKETS.get(name)
387
+
388
+
389
+ # Global metrics provider
390
+ _metrics_provider: MetricsProvider | None = None
391
+
392
+
393
+ def set_metrics_provider(provider: MetricsProvider) -> None:
394
+ """Set the global metrics provider.
395
+
396
+ Args:
397
+ provider: MetricsProvider implementation
398
+ """
399
+ global _metrics_provider
400
+ _metrics_provider = provider
401
+
402
+
403
+ def get_metrics() -> MetricsProvider:
404
+ """Get the global metrics provider.
405
+
406
+ Returns NoOpMetricsProvider if not configured.
407
+
408
+ Returns:
409
+ The configured MetricsProvider
410
+ """
411
+ global _metrics_provider
412
+ if _metrics_provider is None:
413
+ _metrics_provider = NoOpMetricsProvider()
414
+ return _metrics_provider
415
+
416
+
417
+ # =========================================================================
418
+ # Pre-defined metrics for brawny
419
+ # =========================================================================
420
+
421
+ # Counters
422
+ BLOCKS_PROCESSED = "brawny_blocks_processed_total"
423
+ JOBS_TRIGGERED = "brawny_jobs_triggered_total"
424
+ INTENTS_CREATED = "brawny_intents_created_total"
425
+ INTENT_TRANSITIONS = "brawny_intent_transitions_total"
426
+ INTENT_RETRY_ATTEMPTS = "brawny_intent_retry_attempts_total"
427
+ INTENT_CLAIMED = "brawny_intent_claimed_total"
428
+ INTENT_RELEASED = "brawny_intent_released_total"
429
+ INTENT_STATE_INCONSISTENT = "brawny_intent_state_inconsistent_total"
430
+ INTENT_SENDING_STUCK = "brawny_intent_sending_stuck_total"
431
+ TX_BROADCAST = "brawny_tx_broadcast_total"
432
+ TX_CONFIRMED = "brawny_tx_confirmed_total"
433
+ TX_FAILED = "brawny_tx_failed_total"
434
+ TX_REPLACED = "brawny_tx_replaced_total"
435
+ RPC_REQUESTS = "brawny_rpc_requests_total"
436
+ RPC_ERRORS = "brawny_rpc_errors_total"
437
+ RPC_REQUESTS_BY_JOB = "brawny_rpc_requests_by_job_total"
438
+ RPC_RATE_LIMITED = "brawny_rpc_rate_limited_total"
439
+ RPC_FAILOVERS = "brawny_rpc_failovers_total"
440
+ ALERTS_SENT = "brawny_alerts_sent_total"
441
+ JOB_CHECK_TIMEOUTS = "brawny_job_check_timeouts_total"
442
+ JOB_BUILD_TIMEOUTS = "brawny_job_build_timeouts_total"
443
+ REORGS_DETECTED = "brawny_reorg_detected_total"
444
+ DB_CIRCUIT_BREAKER_OPEN = "brawny_db_circuit_breaker_open_total"
445
+ SIMULATION_REVERTED = "brawny_simulation_reverted_total"
446
+ SIMULATION_NETWORK_ERRORS = "brawny_simulation_network_errors_total"
447
+ SIMULATION_RETRIES = "brawny_simulation_retries_total"
448
+ BROADCAST_ATTEMPTS = "brawny_broadcast_attempts_total"
449
+ NONCE_SERIALIZATION_RETRIES = "brawny_nonce_serialization_retries_total"
450
+ ATTEMPT_WRITE_FAILURES = "brawny_attempt_write_failures_total"
451
+
452
+ # Gauges
453
+ LAST_PROCESSED_BLOCK = "brawny_last_processed_block"
454
+ PENDING_INTENTS = "brawny_pending_intents"
455
+ INTENTS_BACKING_OFF = "brawny_intents_backing_off"
456
+ ACTIVE_WORKERS = "brawny_active_workers"
457
+ RPC_ENDPOINT_HEALTH = "brawny_rpc_endpoint_health"
458
+ DB_CIRCUIT_BREAKER_STATE = "brawny_db_circuit_breaker_open"
459
+
460
+ # Stuckness metrics (for "alive but not progressing" alerts)
461
+ # See LOGGING_METRICS_PLAN.md Section 4.1.4
462
+ OLDEST_PENDING_INTENT_AGE_SECONDS = "brawny_oldest_pending_intent_age_seconds"
463
+ LAST_BLOCK_PROCESSED_TIMESTAMP = "brawny_last_block_processed_timestamp"
464
+ LAST_BLOCK_TIMESTAMP = "brawny_last_block_timestamp"
465
+ BLOCK_PROCESSING_LAG_SECONDS = "brawny_block_processing_lag_seconds"
466
+ LAST_INTENT_COMPLETED_TIMESTAMP = "brawny_last_intent_completed_timestamp"
467
+ LAST_TX_CONFIRMED_TIMESTAMP = "brawny_last_tx_confirmed_timestamp"
468
+ LAST_INTENT_CREATED_TIMESTAMP = "brawny_last_intent_created_timestamp"
469
+
470
+ # Invariant gauges (Phase 2)
471
+ # These should be 0 in a healthy system - non-zero indicates issues
472
+ INVARIANT_STUCK_CLAIMED = "brawny_invariant_stuck_claimed"
473
+ INVARIANT_NONCE_GAP_AGE = "brawny_invariant_nonce_gap_age_seconds"
474
+ INVARIANT_PENDING_NO_ATTEMPTS = "brawny_invariant_pending_no_attempts"
475
+ INVARIANT_ORPHANED_CLAIMS = "brawny_invariant_orphaned_claims"
476
+ INVARIANT_ORPHANED_NONCES = "brawny_invariant_orphaned_nonces"
477
+
478
+ # Histograms
479
+ BLOCK_PROCESSING_SECONDS = "brawny_block_processing_seconds"
480
+ TX_CONFIRMATION_SECONDS = "brawny_tx_confirmation_seconds"
481
+ RPC_REQUEST_SECONDS = "brawny_rpc_request_seconds"
482
+ JOB_CHECK_SECONDS = "brawny_job_check_seconds"
483
+ BROADCAST_LATENCY_SECONDS = "brawny_broadcast_latency_seconds"
484
+
485
+ # Metric label schema (fixed, low-cardinality)
486
+ METRIC_LABELS = {
487
+ BLOCKS_PROCESSED: ["chain_id"],
488
+ JOBS_TRIGGERED: ["chain_id", "job_id"],
489
+ INTENTS_CREATED: ["chain_id", "job_id"],
490
+ INTENT_TRANSITIONS: ["chain_id", "from_status", "to_status", "reason"],
491
+ INTENT_RETRY_ATTEMPTS: ["chain_id", "reason"],
492
+ INTENT_CLAIMED: ["chain_id"],
493
+ INTENT_RELEASED: ["chain_id", "reason"],
494
+ INTENT_STATE_INCONSISTENT: ["chain_id", "reason"],
495
+ INTENT_SENDING_STUCK: ["chain_id", "age_bucket"],
496
+ TX_BROADCAST: ["chain_id", "job_id"],
497
+ TX_CONFIRMED: ["chain_id", "job_id"],
498
+ TX_FAILED: ["chain_id", "job_id", "reason"],
499
+ TX_REPLACED: ["chain_id", "job_id"],
500
+ RPC_REQUESTS: ["chain_id", "method", "rpc_category", "rpc_host"],
501
+ RPC_ERRORS: ["chain_id", "method", "rpc_category", "rpc_host"],
502
+ RPC_REQUESTS_BY_JOB: ["chain_id", "job_id", "rpc_category"],
503
+ RPC_RATE_LIMITED: ["endpoint"],
504
+ ALERTS_SENT: ["chain_id", "channel"],
505
+ JOB_CHECK_TIMEOUTS: ["chain_id", "job_id"],
506
+ JOB_BUILD_TIMEOUTS: ["chain_id", "job_id"],
507
+ REORGS_DETECTED: ["chain_id"],
508
+ DB_CIRCUIT_BREAKER_OPEN: ["db_backend"],
509
+ SIMULATION_REVERTED: ["chain_id", "job_id"],
510
+ SIMULATION_NETWORK_ERRORS: ["chain_id", "job_id"],
511
+ SIMULATION_RETRIES: ["chain_id", "job_id"],
512
+ BROADCAST_ATTEMPTS: ["chain_id", "job_id", "broadcast_group", "result"],
513
+ NONCE_SERIALIZATION_RETRIES: [],
514
+ ATTEMPT_WRITE_FAILURES: ["stage"],
515
+ LAST_PROCESSED_BLOCK: ["chain_id"],
516
+ PENDING_INTENTS: ["chain_id"],
517
+ INTENTS_BACKING_OFF: ["chain_id"],
518
+ ACTIVE_WORKERS: ["chain_id"],
519
+ RPC_ENDPOINT_HEALTH: ["endpoint"],
520
+ DB_CIRCUIT_BREAKER_STATE: ["db_backend"],
521
+ OLDEST_PENDING_INTENT_AGE_SECONDS: ["chain_id"],
522
+ LAST_BLOCK_PROCESSED_TIMESTAMP: ["chain_id"],
523
+ LAST_BLOCK_TIMESTAMP: ["chain_id"],
524
+ BLOCK_PROCESSING_LAG_SECONDS: ["chain_id"],
525
+ LAST_INTENT_COMPLETED_TIMESTAMP: ["chain_id"],
526
+ LAST_TX_CONFIRMED_TIMESTAMP: ["chain_id"],
527
+ LAST_INTENT_CREATED_TIMESTAMP: ["chain_id"],
528
+ BLOCK_PROCESSING_SECONDS: ["chain_id"],
529
+ TX_CONFIRMATION_SECONDS: ["chain_id"],
530
+ RPC_REQUEST_SECONDS: ["chain_id", "method", "rpc_category", "rpc_host"],
531
+ JOB_CHECK_SECONDS: ["chain_id", "job_id"],
532
+ BROADCAST_LATENCY_SECONDS: ["chain_id", "job_id", "broadcast_group"],
533
+ # Invariants (Phase 2)
534
+ INVARIANT_STUCK_CLAIMED: ["chain_id"],
535
+ INVARIANT_NONCE_GAP_AGE: ["chain_id"],
536
+ INVARIANT_PENDING_NO_ATTEMPTS: ["chain_id"],
537
+ INVARIANT_ORPHANED_CLAIMS: ["chain_id"],
538
+ INVARIANT_ORPHANED_NONCES: ["chain_id"],
539
+ }
540
+
541
+ METRIC_DESCRIPTIONS = {
542
+ BLOCKS_PROCESSED: "Total blocks processed",
543
+ JOBS_TRIGGERED: "Total jobs triggered",
544
+ INTENTS_CREATED: "Total intents created",
545
+ INTENT_TRANSITIONS: "Total intent status transitions",
546
+ INTENT_RETRY_ATTEMPTS: "Total intent retry attempts",
547
+ INTENT_CLAIMED: "Total intents claimed",
548
+ INTENT_RELEASED: "Total intents released",
549
+ INTENT_STATE_INCONSISTENT: "Total inconsistent intent state detections",
550
+ INTENT_SENDING_STUCK: "Total intents detected stuck in sending",
551
+ TX_BROADCAST: "Total transactions broadcast",
552
+ TX_CONFIRMED: "Total transactions confirmed",
553
+ TX_FAILED: "Total transactions failed",
554
+ TX_REPLACED: "Total transactions replaced",
555
+ RPC_REQUESTS: "Total RPC requests",
556
+ RPC_ERRORS: "Total RPC errors (failed attempts)",
557
+ RPC_REQUESTS_BY_JOB: "RPC requests attributed to jobs",
558
+ RPC_RATE_LIMITED: "RPC requests delayed by rate limiting",
559
+ ALERTS_SENT: "Total alerts sent",
560
+ JOB_CHECK_TIMEOUTS: "Total job check timeouts",
561
+ JOB_BUILD_TIMEOUTS: "Total job build_intent timeouts",
562
+ REORGS_DETECTED: "Total reorgs detected",
563
+ DB_CIRCUIT_BREAKER_OPEN: "Database circuit breaker openings",
564
+ SIMULATION_REVERTED: "Total simulation reverts (permanent failures)",
565
+ SIMULATION_NETWORK_ERRORS: "Total simulation network errors (after all retries)",
566
+ SIMULATION_RETRIES: "Total simulation retry attempts",
567
+ BROADCAST_ATTEMPTS: "Total broadcast attempts by result (success, unavailable, fatal, recoverable)",
568
+ NONCE_SERIALIZATION_RETRIES: "Number of serialization conflict retries during nonce reservation",
569
+ ATTEMPT_WRITE_FAILURES: "Number of failed attempt record writes",
570
+ LAST_PROCESSED_BLOCK: "Last processed block",
571
+ PENDING_INTENTS: "Pending intents",
572
+ INTENTS_BACKING_OFF: "Intents in backoff window (retry_after in future)",
573
+ ACTIVE_WORKERS: "Active worker threads",
574
+ RPC_ENDPOINT_HEALTH: "RPC endpoint health (1=healthy, 0=unhealthy)",
575
+ DB_CIRCUIT_BREAKER_STATE: "Database circuit breaker open state (1=open, 0=closed)",
576
+ OLDEST_PENDING_INTENT_AGE_SECONDS: "Age in seconds of oldest pending intent (CREATED, PENDING, CLAIMED, SENDING)",
577
+ LAST_BLOCK_PROCESSED_TIMESTAMP: "Unix timestamp when we last processed a block",
578
+ LAST_BLOCK_TIMESTAMP: "Unix timestamp of the last processed block (chain time)",
579
+ BLOCK_PROCESSING_LAG_SECONDS: "Seconds between block timestamp and processing completion",
580
+ LAST_INTENT_COMPLETED_TIMESTAMP: "Unix timestamp when we last completed an intent",
581
+ LAST_TX_CONFIRMED_TIMESTAMP: "Unix timestamp when we last confirmed a transaction",
582
+ LAST_INTENT_CREATED_TIMESTAMP: "Unix timestamp when we last created an intent",
583
+ BLOCK_PROCESSING_SECONDS: "Block processing duration in seconds",
584
+ TX_CONFIRMATION_SECONDS: "Transaction confirmation duration in seconds",
585
+ RPC_REQUEST_SECONDS: "RPC request duration in seconds",
586
+ JOB_CHECK_SECONDS: "Job check duration in seconds",
587
+ BROADCAST_LATENCY_SECONDS: "Broadcast transaction latency in seconds",
588
+ # Invariants (Phase 2)
589
+ INVARIANT_STUCK_CLAIMED: "Intents stuck in claimed status > threshold minutes",
590
+ INVARIANT_NONCE_GAP_AGE: "Age in seconds of oldest nonce gap (reserved below chain nonce)",
591
+ INVARIANT_PENDING_NO_ATTEMPTS: "Pending intents with no attempt records (data integrity issue)",
592
+ INVARIANT_ORPHANED_CLAIMS: "Intents with claim_token but status != claimed",
593
+ INVARIANT_ORPHANED_NONCES: "Reserved/in_flight nonces for terminal intents",
594
+ }
@@ -0,0 +1,53 @@
1
+ """Core data models, types, enums, and contexts."""
2
+
3
+ from brawny.model.enums import (
4
+ AttemptStatus,
5
+ IntentStatus,
6
+ NonceStatus,
7
+ )
8
+ from brawny.model.types import (
9
+ BlockInfo,
10
+ GasParams,
11
+ Trigger,
12
+ TxAttempt,
13
+ TxIntent,
14
+ TxIntentSpec,
15
+ )
16
+ from brawny.model.contexts import (
17
+ BlockContext,
18
+ CheckContext,
19
+ BuildContext,
20
+ AlertContext,
21
+ ContractFactory,
22
+ )
23
+ from brawny.model.events import (
24
+ DecodedEvent,
25
+ find_event,
26
+ events_by_name,
27
+ events_by_address,
28
+ )
29
+
30
+ __all__ = [
31
+ # Enums
32
+ "AttemptStatus",
33
+ "IntentStatus",
34
+ "NonceStatus",
35
+ # Types
36
+ "BlockInfo",
37
+ "GasParams",
38
+ "Trigger",
39
+ "TxAttempt",
40
+ "TxIntent",
41
+ "TxIntentSpec",
42
+ # Contexts (OE7)
43
+ "BlockContext",
44
+ "CheckContext",
45
+ "BuildContext",
46
+ "AlertContext",
47
+ "ContractFactory",
48
+ # Events
49
+ "DecodedEvent",
50
+ "find_event",
51
+ "events_by_name",
52
+ "events_by_address",
53
+ ]