brawny 0.1.13__py3-none-any.whl → 0.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brawny/__init__.py +2 -0
- brawny/_context.py +5 -5
- brawny/_rpc/__init__.py +36 -12
- brawny/_rpc/broadcast.py +14 -13
- brawny/_rpc/caller.py +243 -0
- brawny/_rpc/client.py +539 -0
- brawny/_rpc/clients.py +11 -11
- brawny/_rpc/context.py +23 -0
- brawny/_rpc/errors.py +465 -31
- brawny/_rpc/gas.py +7 -6
- brawny/_rpc/pool.py +18 -0
- brawny/_rpc/retry.py +266 -0
- brawny/_rpc/retry_policy.py +81 -0
- brawny/accounts.py +28 -9
- brawny/alerts/__init__.py +15 -18
- brawny/alerts/abi_resolver.py +212 -36
- brawny/alerts/base.py +2 -2
- brawny/alerts/contracts.py +77 -10
- brawny/alerts/errors.py +30 -3
- brawny/alerts/events.py +38 -5
- brawny/alerts/health.py +19 -13
- brawny/alerts/send.py +513 -55
- brawny/api.py +39 -11
- brawny/assets/AGENTS.md +325 -0
- brawny/async_runtime.py +48 -0
- brawny/chain.py +3 -3
- brawny/cli/commands/__init__.py +2 -0
- brawny/cli/commands/console.py +69 -19
- brawny/cli/commands/contract.py +2 -2
- brawny/cli/commands/controls.py +121 -0
- brawny/cli/commands/health.py +2 -2
- brawny/cli/commands/job_dev.py +6 -5
- brawny/cli/commands/jobs.py +99 -2
- brawny/cli/commands/maintenance.py +13 -29
- brawny/cli/commands/migrate.py +1 -0
- brawny/cli/commands/run.py +10 -3
- brawny/cli/commands/script.py +8 -3
- brawny/cli/commands/signer.py +143 -26
- brawny/cli/helpers.py +0 -3
- brawny/cli_templates.py +25 -349
- brawny/config/__init__.py +4 -1
- brawny/config/models.py +43 -57
- brawny/config/parser.py +268 -57
- brawny/config/validation.py +52 -15
- brawny/daemon/context.py +4 -2
- brawny/daemon/core.py +185 -63
- brawny/daemon/loops.py +166 -98
- brawny/daemon/supervisor.py +261 -0
- brawny/db/__init__.py +14 -26
- brawny/db/base.py +248 -151
- brawny/db/global_cache.py +11 -1
- brawny/db/migrate.py +175 -28
- brawny/db/migrations/001_init.sql +4 -3
- brawny/db/migrations/010_add_nonce_gap_index.sql +1 -1
- brawny/db/migrations/011_add_job_logs.sql +1 -2
- brawny/db/migrations/012_add_claimed_by.sql +2 -2
- brawny/db/migrations/013_attempt_unique.sql +10 -0
- brawny/db/migrations/014_add_lease_expires_at.sql +5 -0
- brawny/db/migrations/015_add_signer_alias.sql +14 -0
- brawny/db/migrations/016_runtime_controls_and_quarantine.sql +32 -0
- brawny/db/migrations/017_add_job_drain.sql +6 -0
- brawny/db/migrations/018_add_nonce_reset_audit.sql +20 -0
- brawny/db/migrations/019_add_job_cooldowns.sql +8 -0
- brawny/db/migrations/020_attempt_unique_initial.sql +7 -0
- brawny/db/ops/__init__.py +3 -25
- brawny/db/ops/logs.py +1 -2
- brawny/db/queries.py +47 -91
- brawny/db/serialized.py +65 -0
- brawny/db/sqlite/__init__.py +1001 -0
- brawny/db/sqlite/connection.py +231 -0
- brawny/db/sqlite/execute.py +116 -0
- brawny/db/sqlite/mappers.py +190 -0
- brawny/db/sqlite/repos/attempts.py +372 -0
- brawny/db/sqlite/repos/block_state.py +102 -0
- brawny/db/sqlite/repos/cache.py +104 -0
- brawny/db/sqlite/repos/intents.py +1021 -0
- brawny/db/sqlite/repos/jobs.py +200 -0
- brawny/db/sqlite/repos/maintenance.py +182 -0
- brawny/db/sqlite/repos/signers_nonces.py +566 -0
- brawny/db/sqlite/tx.py +119 -0
- brawny/http.py +194 -0
- brawny/invariants.py +11 -24
- brawny/jobs/base.py +8 -0
- brawny/jobs/job_validation.py +2 -1
- brawny/keystore.py +83 -7
- brawny/lifecycle.py +64 -12
- brawny/logging.py +0 -2
- brawny/metrics.py +84 -12
- brawny/model/contexts.py +111 -9
- brawny/model/enums.py +1 -0
- brawny/model/errors.py +18 -0
- brawny/model/types.py +47 -131
- brawny/network_guard.py +133 -0
- brawny/networks/__init__.py +5 -5
- brawny/networks/config.py +1 -7
- brawny/networks/manager.py +14 -11
- brawny/runtime_controls.py +74 -0
- brawny/scheduler/poller.py +11 -7
- brawny/scheduler/reorg.py +95 -39
- brawny/scheduler/runner.py +442 -168
- brawny/scheduler/shutdown.py +3 -3
- brawny/script_tx.py +3 -3
- brawny/telegram.py +53 -7
- brawny/testing.py +1 -0
- brawny/timeout.py +38 -0
- brawny/tx/executor.py +922 -308
- brawny/tx/intent.py +54 -16
- brawny/tx/monitor.py +31 -12
- brawny/tx/nonce.py +212 -90
- brawny/tx/replacement.py +69 -18
- brawny/tx/retry_policy.py +24 -0
- brawny/tx/stages/types.py +75 -0
- brawny/types.py +18 -0
- brawny/utils.py +41 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/METADATA +3 -3
- brawny-0.1.22.dist-info/RECORD +163 -0
- brawny/_rpc/manager.py +0 -982
- brawny/_rpc/selector.py +0 -156
- brawny/db/base_new.py +0 -165
- brawny/db/mappers.py +0 -182
- brawny/db/migrations/008_add_transactions.sql +0 -72
- brawny/db/ops/attempts.py +0 -108
- brawny/db/ops/blocks.py +0 -83
- brawny/db/ops/cache.py +0 -93
- brawny/db/ops/intents.py +0 -296
- brawny/db/ops/jobs.py +0 -110
- brawny/db/ops/nonces.py +0 -322
- brawny/db/postgres.py +0 -2535
- brawny/db/postgres_new.py +0 -196
- brawny/db/sqlite.py +0 -2733
- brawny/db/sqlite_new.py +0 -191
- brawny-0.1.13.dist-info/RECORD +0 -141
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/WHEEL +0 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/entry_points.txt +0 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/top_level.txt +0 -0
brawny/model/types.py
CHANGED
|
@@ -16,8 +16,7 @@ JSONValue = str | int | float | bool | None | list["JSONValue"] | dict[str, "JSO
|
|
|
16
16
|
# Hook names for type-safe dispatch
|
|
17
17
|
HookName = Literal["on_trigger", "on_success", "on_failure"]
|
|
18
18
|
|
|
19
|
-
from brawny.model.enums import AttemptStatus, IntentStatus, NonceStatus
|
|
20
|
-
from brawny.model.errors import FailureType
|
|
19
|
+
from brawny.model.enums import AttemptStatus, IntentStatus, NonceStatus
|
|
21
20
|
|
|
22
21
|
|
|
23
22
|
def to_wei(value: int | float | str) -> int:
|
|
@@ -96,10 +95,19 @@ class BlockInfo:
|
|
|
96
95
|
block_number: int
|
|
97
96
|
block_hash: str
|
|
98
97
|
timestamp: int
|
|
98
|
+
base_fee: int = 0
|
|
99
99
|
|
|
100
100
|
def __post_init__(self) -> None:
|
|
101
101
|
if not self.block_hash.startswith("0x"):
|
|
102
102
|
object.__setattr__(self, "block_hash", f"0x{self.block_hash}")
|
|
103
|
+
base_fee = self.base_fee
|
|
104
|
+
if base_fee is None:
|
|
105
|
+
base_fee = 0
|
|
106
|
+
elif isinstance(base_fee, str):
|
|
107
|
+
base_fee = int(base_fee, 16) if base_fee.startswith("0x") else int(base_fee)
|
|
108
|
+
else:
|
|
109
|
+
base_fee = int(base_fee)
|
|
110
|
+
object.__setattr__(self, "base_fee", base_fee)
|
|
103
111
|
|
|
104
112
|
|
|
105
113
|
@dataclass
|
|
@@ -154,12 +162,16 @@ class TxIntent:
|
|
|
154
162
|
claimed_at: datetime | None
|
|
155
163
|
created_at: datetime
|
|
156
164
|
updated_at: datetime
|
|
165
|
+
claimed_by: str | None = None
|
|
166
|
+
lease_expires_at: datetime | None = None
|
|
167
|
+
signer_alias: str | None = None
|
|
157
168
|
retry_count: int = 0
|
|
158
169
|
|
|
159
170
|
# Broadcast binding (set on first successful broadcast)
|
|
160
171
|
# These fields preserve the privacy invariant: retries use the SAME endpoints
|
|
161
172
|
broadcast_group: str | None = None
|
|
162
173
|
broadcast_endpoints_json: str | None = None
|
|
174
|
+
broadcast_binding_id: UUID | None = None
|
|
163
175
|
|
|
164
176
|
# Per-intent context for alerts (parsed dict, not JSON string)
|
|
165
177
|
metadata: dict[str, JSONValue] = field(default_factory=dict)
|
|
@@ -240,6 +252,7 @@ class TxAttempt:
|
|
|
240
252
|
# Audit trail (which group and endpoint were used for this attempt)
|
|
241
253
|
broadcast_group: str | None = None
|
|
242
254
|
endpoint_url: str | None = None
|
|
255
|
+
endpoint_binding_id: UUID | None = None
|
|
243
256
|
|
|
244
257
|
|
|
245
258
|
@dataclass
|
|
@@ -272,135 +285,6 @@ class BroadcastInfo:
|
|
|
272
285
|
)
|
|
273
286
|
|
|
274
287
|
|
|
275
|
-
@dataclass
|
|
276
|
-
class TxHashRecord:
|
|
277
|
-
"""Record of a single broadcast attempt, stored in tx_hash_history JSON.
|
|
278
|
-
|
|
279
|
-
This is append-only archival data for debugging and postmortems.
|
|
280
|
-
NEVER query this in normal flows.
|
|
281
|
-
"""
|
|
282
|
-
|
|
283
|
-
tx_hash: str
|
|
284
|
-
nonce: int
|
|
285
|
-
broadcast_at: str # ISO timestamp
|
|
286
|
-
broadcast_block: int | None
|
|
287
|
-
gas_limit: int
|
|
288
|
-
max_fee_per_gas: int
|
|
289
|
-
max_priority_fee_per_gas: int
|
|
290
|
-
reason: str # "initial", "replacement", "fee_bump"
|
|
291
|
-
outcome: str | None = None # "confirmed", "replaced", "failed", None (pending)
|
|
292
|
-
|
|
293
|
-
def to_dict(self) -> dict[str, Any]:
|
|
294
|
-
"""Convert to dict for JSON serialization."""
|
|
295
|
-
return {
|
|
296
|
-
"tx_hash": self.tx_hash,
|
|
297
|
-
"nonce": self.nonce,
|
|
298
|
-
"broadcast_at": self.broadcast_at,
|
|
299
|
-
"broadcast_block": self.broadcast_block,
|
|
300
|
-
"gas_limit": self.gas_limit,
|
|
301
|
-
"max_fee_per_gas": self.max_fee_per_gas,
|
|
302
|
-
"max_priority_fee_per_gas": self.max_priority_fee_per_gas,
|
|
303
|
-
"reason": self.reason,
|
|
304
|
-
"outcome": self.outcome,
|
|
305
|
-
}
|
|
306
|
-
|
|
307
|
-
@classmethod
|
|
308
|
-
def from_dict(cls, data: dict[str, Any]) -> "TxHashRecord":
|
|
309
|
-
"""Create from dict."""
|
|
310
|
-
return cls(
|
|
311
|
-
tx_hash=data["tx_hash"],
|
|
312
|
-
nonce=data["nonce"],
|
|
313
|
-
broadcast_at=data["broadcast_at"],
|
|
314
|
-
broadcast_block=data.get("broadcast_block"),
|
|
315
|
-
gas_limit=data["gas_limit"],
|
|
316
|
-
max_fee_per_gas=data["max_fee_per_gas"],
|
|
317
|
-
max_priority_fee_per_gas=data["max_priority_fee_per_gas"],
|
|
318
|
-
reason=data["reason"],
|
|
319
|
-
outcome=data.get("outcome"),
|
|
320
|
-
)
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
@dataclass
|
|
324
|
-
class Transaction:
|
|
325
|
-
"""Single model representing a job transaction through its full lifecycle.
|
|
326
|
-
|
|
327
|
-
IMPORTANT: Transaction is the only durable execution model.
|
|
328
|
-
Do not add attempt tables.
|
|
329
|
-
|
|
330
|
-
This replaces the old TxIntent + TxAttempt dual model with a single
|
|
331
|
-
row per transaction. Replacement history is preserved in tx_hash_history
|
|
332
|
-
JSON field (append-only, for debugging only).
|
|
333
|
-
"""
|
|
334
|
-
|
|
335
|
-
# Identity (queryable)
|
|
336
|
-
tx_id: UUID # Primary key
|
|
337
|
-
job_id: str
|
|
338
|
-
chain_id: int
|
|
339
|
-
idempotency_key: str # UNIQUE - prevents duplicates
|
|
340
|
-
|
|
341
|
-
# Transaction payload (immutable after creation)
|
|
342
|
-
signer_address: str
|
|
343
|
-
to_address: str
|
|
344
|
-
data: str | None
|
|
345
|
-
value_wei: str
|
|
346
|
-
min_confirmations: int
|
|
347
|
-
deadline_ts: datetime | None
|
|
348
|
-
|
|
349
|
-
# Current state (queryable)
|
|
350
|
-
status: TxStatus # CREATED → BROADCAST → CONFIRMED/FAILED
|
|
351
|
-
failure_type: FailureType | None
|
|
352
|
-
|
|
353
|
-
# Broadcast state (queryable)
|
|
354
|
-
current_tx_hash: str | None # Active tx hash being monitored
|
|
355
|
-
current_nonce: int | None # Nonce for current broadcast
|
|
356
|
-
replacement_count: int # 0 = first attempt, 1+ = replacements
|
|
357
|
-
|
|
358
|
-
# Worker coordination (queryable)
|
|
359
|
-
claim_token: str | None
|
|
360
|
-
claimed_at: datetime | None
|
|
361
|
-
|
|
362
|
-
# Confirmation (queryable)
|
|
363
|
-
included_block: int | None
|
|
364
|
-
confirmed_at: datetime | None
|
|
365
|
-
|
|
366
|
-
# Audit (queryable)
|
|
367
|
-
created_at: datetime
|
|
368
|
-
updated_at: datetime
|
|
369
|
-
|
|
370
|
-
# --- JSON BLOBS (rarely queried) ---
|
|
371
|
-
|
|
372
|
-
# Gas params for current/next attempt
|
|
373
|
-
gas_params_json: str | None = None # {"gas_limit": N, "max_fee": N, "priority_fee": N}
|
|
374
|
-
|
|
375
|
-
# Broadcast binding (privacy invariant)
|
|
376
|
-
broadcast_info_json: str | None = None # {"group": str, "endpoints": [...]}
|
|
377
|
-
|
|
378
|
-
# Error details (debugging only)
|
|
379
|
-
error_info_json: str | None = None # ErrorInfo as JSON
|
|
380
|
-
|
|
381
|
-
# Broadcast history (append-only, debugging only)
|
|
382
|
-
tx_hash_history: str | None = None # JSON array of TxHashRecord
|
|
383
|
-
|
|
384
|
-
@property
|
|
385
|
-
def gas_params(self) -> GasParams | None:
|
|
386
|
-
"""Get gas params from JSON."""
|
|
387
|
-
if self.gas_params_json is None:
|
|
388
|
-
return None
|
|
389
|
-
return GasParams.from_json(self.gas_params_json)
|
|
390
|
-
|
|
391
|
-
@property
|
|
392
|
-
def broadcast_info(self) -> BroadcastInfo | None:
|
|
393
|
-
"""Get broadcast info from JSON."""
|
|
394
|
-
return BroadcastInfo.from_json(self.broadcast_info_json)
|
|
395
|
-
|
|
396
|
-
def get_hash_history(self) -> list[TxHashRecord]:
|
|
397
|
-
"""Get tx hash history from JSON. For debugging only."""
|
|
398
|
-
if self.tx_hash_history is None:
|
|
399
|
-
return []
|
|
400
|
-
records = json.loads(self.tx_hash_history)
|
|
401
|
-
return [TxHashRecord.from_dict(r) for r in records]
|
|
402
|
-
|
|
403
|
-
|
|
404
288
|
@dataclass
|
|
405
289
|
class NonceReservation:
|
|
406
290
|
"""Nonce reservation record."""
|
|
@@ -427,6 +311,36 @@ class SignerState:
|
|
|
427
311
|
updated_at: datetime
|
|
428
312
|
gap_started_at: datetime | None = None # When nonce gap blocking started (for alerts)
|
|
429
313
|
alias: str | None = None # Optional human-readable alias
|
|
314
|
+
quarantined_at: datetime | None = None
|
|
315
|
+
quarantine_reason: str | None = None
|
|
316
|
+
replacements_paused: bool = False
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
@dataclass
|
|
320
|
+
class RuntimeControl:
|
|
321
|
+
"""Runtime containment control with TTL."""
|
|
322
|
+
|
|
323
|
+
control: str
|
|
324
|
+
active: bool
|
|
325
|
+
expires_at: datetime | None
|
|
326
|
+
reason: str | None
|
|
327
|
+
actor: str | None
|
|
328
|
+
mode: str
|
|
329
|
+
updated_at: datetime
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
@dataclass
|
|
333
|
+
class MutationAudit:
|
|
334
|
+
"""Durable mutation audit record."""
|
|
335
|
+
|
|
336
|
+
entity_type: str
|
|
337
|
+
entity_id: str
|
|
338
|
+
action: str
|
|
339
|
+
actor: str | None
|
|
340
|
+
reason: str | None
|
|
341
|
+
source: str | None
|
|
342
|
+
metadata_json: str | None
|
|
343
|
+
created_at: datetime
|
|
430
344
|
|
|
431
345
|
|
|
432
346
|
@dataclass
|
|
@@ -439,6 +353,8 @@ class JobConfig:
|
|
|
439
353
|
check_interval_blocks: int
|
|
440
354
|
last_checked_block_number: int | None
|
|
441
355
|
last_triggered_block_number: int | None
|
|
356
|
+
drain_until: datetime | None
|
|
357
|
+
drain_reason: str | None
|
|
442
358
|
created_at: datetime
|
|
443
359
|
updated_at: datetime
|
|
444
360
|
|
brawny/network_guard.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
"""Runtime network guard for job execution contexts."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import inspect
|
|
6
|
+
import socket
|
|
7
|
+
import threading
|
|
8
|
+
from contextlib import contextmanager
|
|
9
|
+
from contextvars import ContextVar
|
|
10
|
+
from typing import Iterator
|
|
11
|
+
|
|
12
|
+
from brawny.logging import get_logger
|
|
13
|
+
from brawny.metrics import get_metrics, NETWORK_GUARD_ALLOW, NETWORK_GUARD_VIOLATION
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
_guard_depth = 0
|
|
17
|
+
_guard_context_counts: dict[str, int] = {}
|
|
18
|
+
_guard_lock = threading.RLock()
|
|
19
|
+
_allow_network: ContextVar[bool] = ContextVar("brawny_allow_network", default=False)
|
|
20
|
+
_allow_reason: ContextVar[str | None] = ContextVar("brawny_allow_reason", default=None)
|
|
21
|
+
_patched = False
|
|
22
|
+
|
|
23
|
+
_ALLOWED_REASONS = frozenset({"rpc", "alerts", "approved_http_client"})
|
|
24
|
+
|
|
25
|
+
logger = get_logger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def install_network_guard() -> None:
|
|
29
|
+
"""Install socket-level guard (idempotent)."""
|
|
30
|
+
global _patched
|
|
31
|
+
if _patched:
|
|
32
|
+
return
|
|
33
|
+
|
|
34
|
+
original_connect = socket.socket.connect
|
|
35
|
+
original_create_connection = socket.create_connection
|
|
36
|
+
|
|
37
|
+
def guarded_connect(sock: socket.socket, address: object) -> object:
|
|
38
|
+
if _guard_is_active() and not _allow_network.get():
|
|
39
|
+
_record_violation()
|
|
40
|
+
raise RuntimeError(
|
|
41
|
+
"Direct network call blocked. Use ctx.rpc or ctx.http (approved clients)."
|
|
42
|
+
)
|
|
43
|
+
return original_connect(sock, address)
|
|
44
|
+
|
|
45
|
+
def guarded_create_connection(*args: object, **kwargs: object) -> socket.socket:
|
|
46
|
+
if _guard_is_active() and not _allow_network.get():
|
|
47
|
+
_record_violation()
|
|
48
|
+
raise RuntimeError(
|
|
49
|
+
"Direct network call blocked. Use ctx.rpc or ctx.http (approved clients)."
|
|
50
|
+
)
|
|
51
|
+
return original_create_connection(*args, **kwargs)
|
|
52
|
+
|
|
53
|
+
socket.socket.connect = guarded_connect # type: ignore[assignment]
|
|
54
|
+
socket.create_connection = guarded_create_connection # type: ignore[assignment]
|
|
55
|
+
_patched = True
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def _guard_is_active() -> bool:
|
|
59
|
+
with _guard_lock:
|
|
60
|
+
return _guard_depth > 0
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _current_context() -> str:
|
|
64
|
+
with _guard_lock:
|
|
65
|
+
if "job" in _guard_context_counts:
|
|
66
|
+
return "job"
|
|
67
|
+
if _guard_context_counts:
|
|
68
|
+
return next(iter(_guard_context_counts.keys()))
|
|
69
|
+
return "unknown"
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _caller_module() -> str:
|
|
73
|
+
for frame_info in inspect.stack()[2:]:
|
|
74
|
+
module = inspect.getmodule(frame_info.frame)
|
|
75
|
+
if module is None:
|
|
76
|
+
continue
|
|
77
|
+
name = module.__name__
|
|
78
|
+
if name.startswith(("socket", "httpx", "requests", "urllib", "ssl", "asyncio")):
|
|
79
|
+
continue
|
|
80
|
+
if name.startswith("brawny"):
|
|
81
|
+
continue
|
|
82
|
+
return name
|
|
83
|
+
return "unknown"
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _record_violation() -> None:
|
|
87
|
+
metrics = get_metrics()
|
|
88
|
+
metrics.counter(NETWORK_GUARD_VIOLATION).inc(
|
|
89
|
+
context=_current_context(),
|
|
90
|
+
caller_module=_caller_module(),
|
|
91
|
+
)
|
|
92
|
+
logger.warning(
|
|
93
|
+
"network_guard.violation",
|
|
94
|
+
context=_current_context(),
|
|
95
|
+
caller_module=_caller_module(),
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
@contextmanager
|
|
100
|
+
def job_network_guard(context: str = "job") -> Iterator[None]:
|
|
101
|
+
"""Enable network guard within a job execution context."""
|
|
102
|
+
install_network_guard()
|
|
103
|
+
with _guard_lock:
|
|
104
|
+
global _guard_depth
|
|
105
|
+
_guard_depth += 1
|
|
106
|
+
_guard_context_counts[context] = _guard_context_counts.get(context, 0) + 1
|
|
107
|
+
try:
|
|
108
|
+
yield
|
|
109
|
+
finally:
|
|
110
|
+
with _guard_lock:
|
|
111
|
+
current = _guard_context_counts.get(context, 0)
|
|
112
|
+
if current <= 1:
|
|
113
|
+
_guard_context_counts.pop(context, None)
|
|
114
|
+
else:
|
|
115
|
+
_guard_context_counts[context] = current - 1
|
|
116
|
+
_guard_depth -= 1
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
@contextmanager
|
|
120
|
+
def allow_network_calls(reason: str) -> Iterator[None]:
|
|
121
|
+
"""Temporarily allow network calls within a guarded context."""
|
|
122
|
+
if reason not in _ALLOWED_REASONS:
|
|
123
|
+
raise ValueError(f"Invalid allow_network reason: {reason}")
|
|
124
|
+
metrics = get_metrics()
|
|
125
|
+
metrics.counter(NETWORK_GUARD_ALLOW).inc(reason=reason)
|
|
126
|
+
logger.debug("network_guard.allow", reason=reason)
|
|
127
|
+
token = _allow_network.set(True)
|
|
128
|
+
reason_token = _allow_reason.set(reason)
|
|
129
|
+
try:
|
|
130
|
+
yield
|
|
131
|
+
finally:
|
|
132
|
+
_allow_reason.reset(reason_token)
|
|
133
|
+
_allow_network.reset(token)
|
brawny/networks/__init__.py
CHANGED
|
@@ -21,7 +21,7 @@ from typing import TYPE_CHECKING
|
|
|
21
21
|
from brawny.networks.manager import _get_manager
|
|
22
22
|
|
|
23
23
|
if TYPE_CHECKING:
|
|
24
|
-
from brawny._rpc import
|
|
24
|
+
from brawny._rpc.clients import ReadClient
|
|
25
25
|
|
|
26
26
|
|
|
27
27
|
class _NetworkProxy:
|
|
@@ -57,12 +57,12 @@ class _NetworkProxy:
|
|
|
57
57
|
return _get_manager().list_networks()
|
|
58
58
|
|
|
59
59
|
@property
|
|
60
|
-
def rpc(self) ->
|
|
61
|
-
"""Get underlying
|
|
60
|
+
def rpc(self) -> ReadClient | None:
|
|
61
|
+
"""Get underlying read client."""
|
|
62
62
|
return _get_manager().rpc
|
|
63
63
|
|
|
64
|
-
def rpc_required(self) ->
|
|
65
|
-
"""Get
|
|
64
|
+
def rpc_required(self) -> ReadClient:
|
|
65
|
+
"""Get read client, raising error if not connected.
|
|
66
66
|
|
|
67
67
|
Use this instead of checking `if network.rpc is None` everywhere.
|
|
68
68
|
|
brawny/networks/config.py
CHANGED
|
@@ -94,12 +94,9 @@ class NetworkConfig:
|
|
|
94
94
|
multicall2: str | None = None # Passed to Contract layer for batch calls
|
|
95
95
|
timeout: int = 30
|
|
96
96
|
|
|
97
|
-
# RPC settings (passed to
|
|
97
|
+
# RPC settings (passed to ReadClient for production-grade handling)
|
|
98
98
|
max_retries: int = 3
|
|
99
99
|
retry_backoff_base: float = 1.0
|
|
100
|
-
circuit_breaker_seconds: int = 300
|
|
101
|
-
rate_limit_per_second: float | None = None
|
|
102
|
-
rate_limit_burst: int | None = None
|
|
103
100
|
|
|
104
101
|
# Development network fields (None for live networks)
|
|
105
102
|
cmd: str | None = None
|
|
@@ -201,9 +198,6 @@ def _parse_networks(data: dict) -> dict[str, NetworkConfig]:
|
|
|
201
198
|
timeout=net.get("timeout", 30),
|
|
202
199
|
max_retries=net.get("max_retries", 3),
|
|
203
200
|
retry_backoff_base=net.get("retry_backoff_base", 1.0),
|
|
204
|
-
circuit_breaker_seconds=net.get("circuit_breaker_seconds", 300),
|
|
205
|
-
rate_limit_per_second=net.get("rate_limit_per_second"),
|
|
206
|
-
rate_limit_burst=net.get("rate_limit_burst"),
|
|
207
201
|
)
|
|
208
202
|
|
|
209
203
|
# Parse development networks (fork resolution happens at connect time)
|
brawny/networks/manager.py
CHANGED
|
@@ -19,7 +19,7 @@ from typing import TYPE_CHECKING
|
|
|
19
19
|
from brawny.networks.config import NetworkConfig, load_networks
|
|
20
20
|
|
|
21
21
|
if TYPE_CHECKING:
|
|
22
|
-
from brawny._rpc import
|
|
22
|
+
from brawny._rpc.clients import ReadClient
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
def _get_pidfile_dir() -> Path:
|
|
@@ -49,7 +49,7 @@ class NetworkManager:
|
|
|
49
49
|
def __init__(self) -> None:
|
|
50
50
|
self._networks: dict[str, NetworkConfig] | None = None
|
|
51
51
|
self._active: NetworkConfig | None = None
|
|
52
|
-
self._rpc:
|
|
52
|
+
self._rpc: ReadClient | None = None
|
|
53
53
|
self._rpc_process: subprocess.Popen | None = None
|
|
54
54
|
self._rpc_process_network_id: str | None = None # Track which network we started
|
|
55
55
|
self._chain_id: int | None = None # Cached after first lookup
|
|
@@ -161,7 +161,7 @@ class NetworkManager:
|
|
|
161
161
|
# Resolve fork reference at connect time
|
|
162
162
|
self._resolve_fork(config, networks)
|
|
163
163
|
|
|
164
|
-
# Get endpoints (list) -
|
|
164
|
+
# Get endpoints (list) - read client handles failover automatically
|
|
165
165
|
endpoints = config.get_endpoints()
|
|
166
166
|
|
|
167
167
|
# Launch RPC for development networks
|
|
@@ -179,17 +179,20 @@ class NetworkManager:
|
|
|
179
179
|
# Dev networks only use local endpoint
|
|
180
180
|
endpoints = [local_url]
|
|
181
181
|
|
|
182
|
-
# Create
|
|
183
|
-
from brawny._rpc import
|
|
182
|
+
# Create read client with full configuration (preserves brawny's advantages)
|
|
183
|
+
from brawny._rpc.clients import ReadClient
|
|
184
|
+
from brawny._rpc.retry_policy import policy_from_values
|
|
184
185
|
|
|
185
|
-
self._rpc =
|
|
186
|
+
self._rpc = ReadClient(
|
|
186
187
|
endpoints=endpoints,
|
|
187
188
|
timeout_seconds=float(config.timeout),
|
|
188
189
|
max_retries=config.max_retries,
|
|
189
190
|
retry_backoff_base=config.retry_backoff_base,
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
191
|
+
retry_policy=policy_from_values(
|
|
192
|
+
"FAST_READ",
|
|
193
|
+
max_attempts=config.max_retries,
|
|
194
|
+
base_backoff_seconds=config.retry_backoff_base,
|
|
195
|
+
),
|
|
193
196
|
chain_id=config.chainid,
|
|
194
197
|
)
|
|
195
198
|
self._active = config
|
|
@@ -403,8 +406,8 @@ class NetworkManager:
|
|
|
403
406
|
}
|
|
404
407
|
|
|
405
408
|
@property
|
|
406
|
-
def rpc(self) ->
|
|
407
|
-
"""Get underlying
|
|
409
|
+
def rpc(self) -> ReadClient | None:
|
|
410
|
+
"""Get underlying read client."""
|
|
408
411
|
return self._rpc
|
|
409
412
|
|
|
410
413
|
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
"""Runtime controls with TTL caching."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import threading
|
|
6
|
+
import time
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from typing import TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
from brawny.model.types import RuntimeControl
|
|
12
|
+
from brawny.metrics import RUNTIME_CONTROL_ACTIVE, RUNTIME_CONTROL_TTL_SECONDS, get_metrics
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from brawny.db.base import Database
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class _CachedControl:
|
|
20
|
+
control: RuntimeControl | None
|
|
21
|
+
expires_at: float
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class RuntimeControls:
|
|
25
|
+
"""Cached runtime controls accessor."""
|
|
26
|
+
|
|
27
|
+
def __init__(self, db: "Database", ttl_seconds: float = 2.0) -> None:
|
|
28
|
+
self._db = db
|
|
29
|
+
self._ttl_seconds = ttl_seconds
|
|
30
|
+
self._lock = threading.RLock()
|
|
31
|
+
self._cache: dict[str, _CachedControl] = {}
|
|
32
|
+
|
|
33
|
+
def get(self, control: str) -> RuntimeControl | None:
|
|
34
|
+
now = time.monotonic()
|
|
35
|
+
with self._lock:
|
|
36
|
+
cached = self._cache.get(control)
|
|
37
|
+
if cached and cached.expires_at > now:
|
|
38
|
+
return cached.control
|
|
39
|
+
|
|
40
|
+
value = self._db.get_runtime_control(control)
|
|
41
|
+
self._emit_metrics(control, value)
|
|
42
|
+
with self._lock:
|
|
43
|
+
self._cache[control] = _CachedControl(
|
|
44
|
+
control=value,
|
|
45
|
+
expires_at=now + self._ttl_seconds,
|
|
46
|
+
)
|
|
47
|
+
return value
|
|
48
|
+
|
|
49
|
+
def is_active(self, control: str) -> bool:
|
|
50
|
+
rc = self.get(control)
|
|
51
|
+
if rc is None or not rc.active:
|
|
52
|
+
return False
|
|
53
|
+
if rc.expires_at is None:
|
|
54
|
+
return True
|
|
55
|
+
now = datetime.utcnow()
|
|
56
|
+
if rc.expires_at.tzinfo is not None:
|
|
57
|
+
now = datetime.now(rc.expires_at.tzinfo)
|
|
58
|
+
return rc.expires_at > now
|
|
59
|
+
|
|
60
|
+
def refresh(self, control: str) -> None:
|
|
61
|
+
with self._lock:
|
|
62
|
+
if control in self._cache:
|
|
63
|
+
del self._cache[control]
|
|
64
|
+
|
|
65
|
+
def _emit_metrics(self, control: str, rc: RuntimeControl | None) -> None:
|
|
66
|
+
metrics = get_metrics()
|
|
67
|
+
if rc is None:
|
|
68
|
+
metrics.gauge(RUNTIME_CONTROL_ACTIVE).set(0, control=control)
|
|
69
|
+
return
|
|
70
|
+
metrics.gauge(RUNTIME_CONTROL_ACTIVE).set(1 if rc.active else 0, control=control)
|
|
71
|
+
if rc.expires_at:
|
|
72
|
+
now = datetime.utcnow()
|
|
73
|
+
ttl = (rc.expires_at - now).total_seconds()
|
|
74
|
+
metrics.gauge(RUNTIME_CONTROL_TTL_SECONDS).set(max(ttl, 0), control=control)
|
brawny/scheduler/poller.py
CHANGED
|
@@ -36,7 +36,7 @@ if TYPE_CHECKING:
|
|
|
36
36
|
from brawny.config import Config
|
|
37
37
|
from brawny.db.base import Database
|
|
38
38
|
from brawny.model.types import BlockInfo
|
|
39
|
-
from brawny._rpc.
|
|
39
|
+
from brawny._rpc.clients import ReadClient
|
|
40
40
|
from brawny.scheduler.reorg import ReorgDetector
|
|
41
41
|
|
|
42
42
|
logger = get_logger(__name__)
|
|
@@ -69,7 +69,7 @@ class BlockPoller:
|
|
|
69
69
|
def __init__(
|
|
70
70
|
self,
|
|
71
71
|
db: Database,
|
|
72
|
-
rpc:
|
|
72
|
+
rpc: ReadClient,
|
|
73
73
|
config: Config,
|
|
74
74
|
block_handler: Callable[[BlockInfo], None],
|
|
75
75
|
reorg_detector: "ReorgDetector | None" = None,
|
|
@@ -250,6 +250,12 @@ class BlockPoller:
|
|
|
250
250
|
try:
|
|
251
251
|
if self._reorg_detector:
|
|
252
252
|
reorg_result = self._reorg_detector.check(block_number)
|
|
253
|
+
if reorg_result is None:
|
|
254
|
+
return PollResult(
|
|
255
|
+
blocks_processed=0,
|
|
256
|
+
head_block=head_block,
|
|
257
|
+
last_processed=last_processed,
|
|
258
|
+
)
|
|
253
259
|
if reorg_result.reorg_detected:
|
|
254
260
|
if reorg_result.pause:
|
|
255
261
|
logger.error(
|
|
@@ -306,15 +312,11 @@ class BlockPoller:
|
|
|
306
312
|
block_info = self._fetch_block_info(block_number)
|
|
307
313
|
if block_info is not None:
|
|
308
314
|
break
|
|
309
|
-
# Exponential backoff: 0.5s, 1s, 2s
|
|
310
|
-
backoff = 0.5 * (2**retry)
|
|
311
315
|
logger.debug(
|
|
312
316
|
"poller.block_fetch_retry",
|
|
313
317
|
block_number=block_number,
|
|
314
318
|
retry=retry + 1,
|
|
315
|
-
backoff_seconds=backoff,
|
|
316
319
|
)
|
|
317
|
-
time.sleep(backoff)
|
|
318
320
|
|
|
319
321
|
if block_info is None:
|
|
320
322
|
logger.warning(
|
|
@@ -446,6 +448,7 @@ class BlockPoller:
|
|
|
446
448
|
BlockInfo or None if not found
|
|
447
449
|
"""
|
|
448
450
|
from brawny.model.types import BlockInfo
|
|
451
|
+
from brawny._rpc.errors import RPCError
|
|
449
452
|
|
|
450
453
|
try:
|
|
451
454
|
block = self._rpc.get_block(block_number)
|
|
@@ -457,8 +460,9 @@ class BlockPoller:
|
|
|
457
460
|
block_number=block["number"],
|
|
458
461
|
block_hash=f"0x{block['hash'].hex()}" if isinstance(block["hash"], bytes) else block["hash"],
|
|
459
462
|
timestamp=block["timestamp"],
|
|
463
|
+
base_fee=block.get("baseFeePerGas", 0),
|
|
460
464
|
)
|
|
461
|
-
except
|
|
465
|
+
except (RPCError, KeyError, TypeError, ValueError):
|
|
462
466
|
return None
|
|
463
467
|
|
|
464
468
|
def poll_once(self) -> PollResult:
|