brawny 0.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brawny/__init__.py +106 -0
- brawny/_context.py +232 -0
- brawny/_rpc/__init__.py +38 -0
- brawny/_rpc/broadcast.py +172 -0
- brawny/_rpc/clients.py +98 -0
- brawny/_rpc/context.py +49 -0
- brawny/_rpc/errors.py +252 -0
- brawny/_rpc/gas.py +158 -0
- brawny/_rpc/manager.py +982 -0
- brawny/_rpc/selector.py +156 -0
- brawny/accounts.py +534 -0
- brawny/alerts/__init__.py +132 -0
- brawny/alerts/abi_resolver.py +530 -0
- brawny/alerts/base.py +152 -0
- brawny/alerts/context.py +271 -0
- brawny/alerts/contracts.py +635 -0
- brawny/alerts/encoded_call.py +201 -0
- brawny/alerts/errors.py +267 -0
- brawny/alerts/events.py +680 -0
- brawny/alerts/function_caller.py +364 -0
- brawny/alerts/health.py +185 -0
- brawny/alerts/routing.py +118 -0
- brawny/alerts/send.py +364 -0
- brawny/api.py +660 -0
- brawny/chain.py +93 -0
- brawny/cli/__init__.py +16 -0
- brawny/cli/app.py +17 -0
- brawny/cli/bootstrap.py +37 -0
- brawny/cli/commands/__init__.py +41 -0
- brawny/cli/commands/abi.py +93 -0
- brawny/cli/commands/accounts.py +632 -0
- brawny/cli/commands/console.py +495 -0
- brawny/cli/commands/contract.py +139 -0
- brawny/cli/commands/health.py +112 -0
- brawny/cli/commands/init_project.py +86 -0
- brawny/cli/commands/intents.py +130 -0
- brawny/cli/commands/job_dev.py +254 -0
- brawny/cli/commands/jobs.py +308 -0
- brawny/cli/commands/logs.py +87 -0
- brawny/cli/commands/maintenance.py +182 -0
- brawny/cli/commands/migrate.py +51 -0
- brawny/cli/commands/networks.py +253 -0
- brawny/cli/commands/run.py +249 -0
- brawny/cli/commands/script.py +209 -0
- brawny/cli/commands/signer.py +248 -0
- brawny/cli/helpers.py +265 -0
- brawny/cli_templates.py +1445 -0
- brawny/config/__init__.py +74 -0
- brawny/config/models.py +404 -0
- brawny/config/parser.py +633 -0
- brawny/config/routing.py +55 -0
- brawny/config/validation.py +246 -0
- brawny/daemon/__init__.py +14 -0
- brawny/daemon/context.py +69 -0
- brawny/daemon/core.py +702 -0
- brawny/daemon/loops.py +327 -0
- brawny/db/__init__.py +78 -0
- brawny/db/base.py +986 -0
- brawny/db/base_new.py +165 -0
- brawny/db/circuit_breaker.py +97 -0
- brawny/db/global_cache.py +298 -0
- brawny/db/mappers.py +182 -0
- brawny/db/migrate.py +349 -0
- brawny/db/migrations/001_init.sql +186 -0
- brawny/db/migrations/002_add_included_block.sql +7 -0
- brawny/db/migrations/003_add_broadcast_at.sql +10 -0
- brawny/db/migrations/004_broadcast_binding.sql +20 -0
- brawny/db/migrations/005_add_retry_after.sql +9 -0
- brawny/db/migrations/006_add_retry_count_column.sql +11 -0
- brawny/db/migrations/007_add_gap_tracking.sql +18 -0
- brawny/db/migrations/008_add_transactions.sql +72 -0
- brawny/db/migrations/009_add_intent_metadata.sql +5 -0
- brawny/db/migrations/010_add_nonce_gap_index.sql +9 -0
- brawny/db/migrations/011_add_job_logs.sql +24 -0
- brawny/db/migrations/012_add_claimed_by.sql +5 -0
- brawny/db/ops/__init__.py +29 -0
- brawny/db/ops/attempts.py +108 -0
- brawny/db/ops/blocks.py +83 -0
- brawny/db/ops/cache.py +93 -0
- brawny/db/ops/intents.py +296 -0
- brawny/db/ops/jobs.py +110 -0
- brawny/db/ops/logs.py +97 -0
- brawny/db/ops/nonces.py +322 -0
- brawny/db/postgres.py +2535 -0
- brawny/db/postgres_new.py +196 -0
- brawny/db/queries.py +584 -0
- brawny/db/sqlite.py +2733 -0
- brawny/db/sqlite_new.py +191 -0
- brawny/history.py +126 -0
- brawny/interfaces.py +136 -0
- brawny/invariants.py +155 -0
- brawny/jobs/__init__.py +26 -0
- brawny/jobs/base.py +287 -0
- brawny/jobs/discovery.py +233 -0
- brawny/jobs/job_validation.py +111 -0
- brawny/jobs/kv.py +125 -0
- brawny/jobs/registry.py +283 -0
- brawny/keystore.py +484 -0
- brawny/lifecycle.py +551 -0
- brawny/logging.py +290 -0
- brawny/metrics.py +594 -0
- brawny/model/__init__.py +53 -0
- brawny/model/contexts.py +319 -0
- brawny/model/enums.py +70 -0
- brawny/model/errors.py +194 -0
- brawny/model/events.py +93 -0
- brawny/model/startup.py +20 -0
- brawny/model/types.py +483 -0
- brawny/networks/__init__.py +96 -0
- brawny/networks/config.py +269 -0
- brawny/networks/manager.py +423 -0
- brawny/obs/__init__.py +67 -0
- brawny/obs/emit.py +158 -0
- brawny/obs/health.py +175 -0
- brawny/obs/heartbeat.py +133 -0
- brawny/reconciliation.py +108 -0
- brawny/scheduler/__init__.py +19 -0
- brawny/scheduler/poller.py +472 -0
- brawny/scheduler/reorg.py +632 -0
- brawny/scheduler/runner.py +708 -0
- brawny/scheduler/shutdown.py +371 -0
- brawny/script_tx.py +297 -0
- brawny/scripting.py +251 -0
- brawny/startup.py +76 -0
- brawny/telegram.py +393 -0
- brawny/testing.py +108 -0
- brawny/tx/__init__.py +41 -0
- brawny/tx/executor.py +1071 -0
- brawny/tx/fees.py +50 -0
- brawny/tx/intent.py +423 -0
- brawny/tx/monitor.py +628 -0
- brawny/tx/nonce.py +498 -0
- brawny/tx/replacement.py +456 -0
- brawny/tx/utils.py +26 -0
- brawny/utils.py +205 -0
- brawny/validation.py +69 -0
- brawny-0.1.13.dist-info/METADATA +156 -0
- brawny-0.1.13.dist-info/RECORD +141 -0
- brawny-0.1.13.dist-info/WHEEL +5 -0
- brawny-0.1.13.dist-info/entry_points.txt +2 -0
- brawny-0.1.13.dist-info/top_level.txt +1 -0
brawny/__init__.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
"""
|
|
2
|
+
brawny: Block-driven Ethereum job/transaction execution framework.
|
|
3
|
+
|
|
4
|
+
This package provides a robust, production-ready framework for scheduling
|
|
5
|
+
and executing Ethereum transactions based on block events.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from brawny.jobs.base import Job
|
|
9
|
+
from brawny.jobs.registry import registry, job
|
|
10
|
+
from brawny.model.types import (
|
|
11
|
+
BlockInfo,
|
|
12
|
+
Trigger,
|
|
13
|
+
TxAttempt,
|
|
14
|
+
TxIntent,
|
|
15
|
+
TxIntentSpec,
|
|
16
|
+
to_wei,
|
|
17
|
+
)
|
|
18
|
+
from brawny.model.contexts import (
|
|
19
|
+
BlockContext,
|
|
20
|
+
CheckContext,
|
|
21
|
+
BuildContext,
|
|
22
|
+
AlertContext,
|
|
23
|
+
)
|
|
24
|
+
from brawny.model.events import DecodedEvent, find_event, events_by_name
|
|
25
|
+
from brawny.telegram import telegram, get_telegram, TelegramBot
|
|
26
|
+
from brawny.testing import job_context
|
|
27
|
+
from brawny.interfaces import interface
|
|
28
|
+
|
|
29
|
+
# Implicit context helpers (Flask-like pattern)
|
|
30
|
+
from brawny.api import (
|
|
31
|
+
block,
|
|
32
|
+
ctx, # Get current phase context (CheckContext or BuildContext)
|
|
33
|
+
trigger,
|
|
34
|
+
intent,
|
|
35
|
+
shorten,
|
|
36
|
+
explorer_link,
|
|
37
|
+
gas_ok,
|
|
38
|
+
gas_quote,
|
|
39
|
+
kv, # Persistent KV store
|
|
40
|
+
alert, # Send alerts from job hooks
|
|
41
|
+
rpc, # RPC proxy (internal package renamed to _rpc to avoid collision)
|
|
42
|
+
get_address_from_alias,
|
|
43
|
+
Contract, # Brownie-style
|
|
44
|
+
Wei, # Brownie-style
|
|
45
|
+
web3, # Brownie-style
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
# Brownie-style singletons for scripts
|
|
49
|
+
from brawny.accounts import accounts, Account
|
|
50
|
+
from brawny.history import history
|
|
51
|
+
from brawny.chain import chain
|
|
52
|
+
from brawny.networks import network
|
|
53
|
+
|
|
54
|
+
__version__ = "0.1.0"
|
|
55
|
+
|
|
56
|
+
__all__ = [
|
|
57
|
+
"__version__",
|
|
58
|
+
"Job",
|
|
59
|
+
"job",
|
|
60
|
+
"registry",
|
|
61
|
+
"BlockInfo",
|
|
62
|
+
"Trigger",
|
|
63
|
+
"TxAttempt",
|
|
64
|
+
"TxIntent",
|
|
65
|
+
"TxIntentSpec",
|
|
66
|
+
"to_wei",
|
|
67
|
+
# Phase-specific contexts (OE7)
|
|
68
|
+
"BlockContext",
|
|
69
|
+
"CheckContext",
|
|
70
|
+
"BuildContext",
|
|
71
|
+
"AlertContext",
|
|
72
|
+
# Events
|
|
73
|
+
"DecodedEvent",
|
|
74
|
+
"find_event",
|
|
75
|
+
"events_by_name",
|
|
76
|
+
# Implicit context helpers
|
|
77
|
+
"block",
|
|
78
|
+
"ctx",
|
|
79
|
+
"trigger",
|
|
80
|
+
"intent",
|
|
81
|
+
"shorten",
|
|
82
|
+
"explorer_link",
|
|
83
|
+
"gas_ok",
|
|
84
|
+
"gas_quote",
|
|
85
|
+
"kv",
|
|
86
|
+
"alert",
|
|
87
|
+
"rpc",
|
|
88
|
+
"get_address_from_alias",
|
|
89
|
+
# Brownie-style helpers
|
|
90
|
+
"Contract",
|
|
91
|
+
"Wei",
|
|
92
|
+
"web3",
|
|
93
|
+
# Brownie-style singletons
|
|
94
|
+
"accounts",
|
|
95
|
+
"Account",
|
|
96
|
+
"history",
|
|
97
|
+
"chain",
|
|
98
|
+
"network",
|
|
99
|
+
# Telegram
|
|
100
|
+
"telegram",
|
|
101
|
+
"get_telegram",
|
|
102
|
+
"TelegramBot",
|
|
103
|
+
# Testing
|
|
104
|
+
"job_context",
|
|
105
|
+
"interface",
|
|
106
|
+
]
|
brawny/_context.py
ADDED
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
"""Implicit context for job hooks and console.
|
|
2
|
+
|
|
3
|
+
Provides thread-safe context storage using Python's contextvars module.
|
|
4
|
+
The framework sets these before calling check() / build_tx(), allowing
|
|
5
|
+
helper functions like Contract(), trigger(), intent() to work without explicit
|
|
6
|
+
ctx parameter.
|
|
7
|
+
|
|
8
|
+
Usage (in job methods):
|
|
9
|
+
from brawny import Contract, trigger, intent, block
|
|
10
|
+
|
|
11
|
+
def check(self, ctx):
|
|
12
|
+
vault = Contract("vault") # Works because context is set
|
|
13
|
+
return trigger(reason="...", tx_required=True)
|
|
14
|
+
|
|
15
|
+
Usage (in alert hooks):
|
|
16
|
+
from brawny import Contract, shorten, explorer_link
|
|
17
|
+
|
|
18
|
+
def alert_confirmed(self, ctx):
|
|
19
|
+
vault = Contract("vault")
|
|
20
|
+
return f"Done!\\n{explorer_link(ctx.receipt.transactionHash.hex())}"
|
|
21
|
+
|
|
22
|
+
Usage (in console):
|
|
23
|
+
>>> claimer = interface.IClaimer("0x...") # Works via console context
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
from __future__ import annotations
|
|
27
|
+
|
|
28
|
+
import contextvars
|
|
29
|
+
from contextvars import Token
|
|
30
|
+
from dataclasses import dataclass
|
|
31
|
+
from typing import TYPE_CHECKING, Any, Union
|
|
32
|
+
|
|
33
|
+
if TYPE_CHECKING:
|
|
34
|
+
from brawny.model.contexts import CheckContext, BuildContext, AlertContext
|
|
35
|
+
from brawny.jobs.base import Job
|
|
36
|
+
from brawny._rpc import RPCManager
|
|
37
|
+
from brawny.alerts.contracts import ContractSystem
|
|
38
|
+
|
|
39
|
+
# Type alias for any phase context
|
|
40
|
+
PhaseContext = Union["CheckContext", "BuildContext"]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass(frozen=True)
|
|
44
|
+
class ActiveContext:
|
|
45
|
+
"""Minimal context for Contract()/interface/web3 access.
|
|
46
|
+
|
|
47
|
+
Used by console (and potentially scripts/tests) to provide the
|
|
48
|
+
necessary dependencies without requiring full job/alert context.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
rpc: RPCManager
|
|
52
|
+
contract_system: ContractSystem
|
|
53
|
+
chain_id: int
|
|
54
|
+
network_name: str | None = None
|
|
55
|
+
rpc_group: str | None = None
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
# Context variables - set by framework before calling job hooks
|
|
59
|
+
_job_ctx: contextvars.ContextVar[PhaseContext | None] = contextvars.ContextVar(
|
|
60
|
+
"job_ctx", default=None
|
|
61
|
+
)
|
|
62
|
+
_current_job: contextvars.ContextVar[Job | None] = contextvars.ContextVar(
|
|
63
|
+
"current_job", default=None
|
|
64
|
+
)
|
|
65
|
+
# Alert context - uses Any to support both old AlertContext and new hook contexts
|
|
66
|
+
_alert_ctx: contextvars.ContextVar[Any | None] = contextvars.ContextVar(
|
|
67
|
+
"alert_ctx", default=None
|
|
68
|
+
)
|
|
69
|
+
_console_ctx: contextvars.ContextVar[ActiveContext | None] = contextvars.ContextVar(
|
|
70
|
+
"console_ctx", default=None
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Block pinning for check() scope - ensures consistent snapshot reads
|
|
74
|
+
_check_block: contextvars.ContextVar[int | None] = contextvars.ContextVar(
|
|
75
|
+
"check_block", default=None
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def get_job_context() -> PhaseContext:
|
|
80
|
+
"""Get the current phase context (CheckContext or BuildContext).
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
The active context
|
|
84
|
+
|
|
85
|
+
Raises:
|
|
86
|
+
LookupError: If called outside a job hook (check/build_tx)
|
|
87
|
+
"""
|
|
88
|
+
ctx = _job_ctx.get()
|
|
89
|
+
if ctx is None:
|
|
90
|
+
raise LookupError(
|
|
91
|
+
"No active context. Must be called from within check() or build_tx()."
|
|
92
|
+
)
|
|
93
|
+
return ctx
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def get_current_job() -> Job:
|
|
97
|
+
"""Get the current Job instance.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
The active Job
|
|
101
|
+
|
|
102
|
+
Raises:
|
|
103
|
+
LookupError: If called outside a job hook
|
|
104
|
+
"""
|
|
105
|
+
job = _current_job.get()
|
|
106
|
+
if job is None:
|
|
107
|
+
raise LookupError("No active job.")
|
|
108
|
+
return job
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def get_alert_context() -> Any | None:
|
|
112
|
+
"""Get the current alert context if available.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
The active context (TriggerContext, SuccessContext, FailureContext,
|
|
116
|
+
or AlertContext for legacy hooks), or None if not in a hook
|
|
117
|
+
"""
|
|
118
|
+
return _alert_ctx.get()
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def set_alert_context(ctx: Any) -> Token:
|
|
122
|
+
"""Set the current alert context, return token for reset.
|
|
123
|
+
|
|
124
|
+
Called by the framework before invoking hooks. Use token-based reset
|
|
125
|
+
to ensure safe nesting if hooks call helpers.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
ctx: Context to set (TriggerContext, SuccessContext, FailureContext)
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Token that can be used to reset the context
|
|
132
|
+
"""
|
|
133
|
+
return _alert_ctx.set(ctx)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def reset_alert_context(token: Token) -> None:
|
|
137
|
+
"""Reset alert context to previous value using token.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
token: Token from set_alert_context()
|
|
141
|
+
"""
|
|
142
|
+
_alert_ctx.reset(token)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def get_console_context() -> ActiveContext | None:
|
|
146
|
+
"""Get the current console ActiveContext if available.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
The active console context, or None if not in console
|
|
150
|
+
"""
|
|
151
|
+
return _console_ctx.get()
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def set_console_context(ctx: ActiveContext | None) -> contextvars.Token:
|
|
155
|
+
"""Set the console ActiveContext.
|
|
156
|
+
|
|
157
|
+
Called by console at startup to enable Contract()/interface/web3 access.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
ctx: ActiveContext to set, or None to clear
|
|
161
|
+
|
|
162
|
+
Returns:
|
|
163
|
+
Token that can be used to reset the context (useful for tests)
|
|
164
|
+
"""
|
|
165
|
+
return _console_ctx.set(ctx)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
# =============================================================================
|
|
169
|
+
# Block Pinning for check() Scope
|
|
170
|
+
# =============================================================================
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def set_check_block(block_number: int) -> contextvars.Token:
|
|
174
|
+
"""Set the block number for check() scope.
|
|
175
|
+
|
|
176
|
+
Called by the runner before invoking check() to pin all Contract reads
|
|
177
|
+
to a consistent block snapshot.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
block_number: Block number to pin reads to
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
Token for reset (must call reset_check_block when check completes)
|
|
184
|
+
"""
|
|
185
|
+
return _check_block.set(block_number)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def reset_check_block(token: contextvars.Token) -> None:
|
|
189
|
+
"""Reset check block after check() completes.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
token: Token from set_check_block()
|
|
193
|
+
"""
|
|
194
|
+
_check_block.reset(token)
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def get_check_block() -> int | None:
|
|
198
|
+
"""Get current check block, or None if not in check scope.
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
Block number if in check() scope, None otherwise
|
|
202
|
+
"""
|
|
203
|
+
return _check_block.get()
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
def resolve_block_identifier(
|
|
207
|
+
explicit: int | str | None,
|
|
208
|
+
handle_block: int | None = None,
|
|
209
|
+
) -> int | str:
|
|
210
|
+
"""Resolve block identifier with 4-level precedence.
|
|
211
|
+
|
|
212
|
+
Precedence (highest to lowest):
|
|
213
|
+
1. Explicit param (caller override) - always wins
|
|
214
|
+
2. Handle's baked block (from ctx.contracts.at_block())
|
|
215
|
+
3. Check scope pin (_check_block contextvar)
|
|
216
|
+
4. Default "latest"
|
|
217
|
+
|
|
218
|
+
Args:
|
|
219
|
+
explicit: Explicitly passed block_identifier (None if not passed)
|
|
220
|
+
handle_block: Block baked into ContractHandle (None if not set)
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
Block identifier to use for eth_call (int or "latest")
|
|
224
|
+
"""
|
|
225
|
+
if explicit is not None:
|
|
226
|
+
return explicit
|
|
227
|
+
if handle_block is not None:
|
|
228
|
+
return handle_block
|
|
229
|
+
check_block = get_check_block()
|
|
230
|
+
if check_block is not None:
|
|
231
|
+
return check_block
|
|
232
|
+
return "latest"
|
brawny/_rpc/__init__.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""RPC management with multi-endpoint failover and health tracking.
|
|
2
|
+
|
|
3
|
+
OE6 Simplification:
|
|
4
|
+
- Uses EndpointSelector for health-aware endpoint ordering
|
|
5
|
+
- Explicit failover gate (only on RPCRetryableError)
|
|
6
|
+
- Removed circuit breaker and rate limiter (simpler error handling)
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from brawny._rpc.errors import (
|
|
10
|
+
RPCError,
|
|
11
|
+
RPCFatalError,
|
|
12
|
+
RPCRecoverableError,
|
|
13
|
+
RPCRetryableError,
|
|
14
|
+
classify_error,
|
|
15
|
+
normalize_error_code,
|
|
16
|
+
)
|
|
17
|
+
from brawny._rpc.manager import RPCManager
|
|
18
|
+
from brawny._rpc.selector import EndpointSelector, EndpointHealth
|
|
19
|
+
from brawny._rpc.context import (
|
|
20
|
+
get_job_context,
|
|
21
|
+
reset_job_context,
|
|
22
|
+
set_job_context,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
__all__ = [
|
|
26
|
+
"RPCManager",
|
|
27
|
+
"EndpointSelector",
|
|
28
|
+
"EndpointHealth",
|
|
29
|
+
"RPCError",
|
|
30
|
+
"RPCFatalError",
|
|
31
|
+
"RPCRecoverableError",
|
|
32
|
+
"RPCRetryableError",
|
|
33
|
+
"classify_error",
|
|
34
|
+
"normalize_error_code",
|
|
35
|
+
"get_job_context",
|
|
36
|
+
"reset_job_context",
|
|
37
|
+
"set_job_context",
|
|
38
|
+
]
|
brawny/_rpc/broadcast.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
"""Broadcast helpers with isolation guarantees.
|
|
2
|
+
|
|
3
|
+
This is the ONLY place that wraps RPCPoolExhaustedError → RPCGroupUnavailableError.
|
|
4
|
+
RPCManager does the endpoint iteration; this module adds group context.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import time
|
|
10
|
+
from typing import TYPE_CHECKING
|
|
11
|
+
|
|
12
|
+
from brawny.metrics import (
|
|
13
|
+
BROADCAST_ATTEMPTS,
|
|
14
|
+
BROADCAST_LATENCY_SECONDS,
|
|
15
|
+
get_metrics,
|
|
16
|
+
)
|
|
17
|
+
from brawny._rpc.errors import (
|
|
18
|
+
RPCFatalError,
|
|
19
|
+
RPCPoolExhaustedError,
|
|
20
|
+
RPCGroupUnavailableError,
|
|
21
|
+
RPCRecoverableError,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
from brawny.config import Config
|
|
26
|
+
from brawny._rpc.manager import RPCManager
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def create_broadcast_manager(endpoints: list[str], config: "Config") -> "RPCManager":
|
|
30
|
+
"""Create an RPCManager for broadcasting to specific endpoints.
|
|
31
|
+
|
|
32
|
+
This creates a dedicated RPCManager instance for broadcasting.
|
|
33
|
+
Each call uses the provided endpoints (from binding snapshot for retries,
|
|
34
|
+
or from current config for first broadcast).
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
endpoints: List of endpoint URLs to use (must be canonical)
|
|
38
|
+
config: Config for RPC settings
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
RPCManager configured for the provided endpoints
|
|
42
|
+
"""
|
|
43
|
+
from brawny._rpc.manager import RPCManager
|
|
44
|
+
|
|
45
|
+
return RPCManager(
|
|
46
|
+
endpoints=endpoints,
|
|
47
|
+
timeout_seconds=config.rpc_timeout_seconds,
|
|
48
|
+
max_retries=config.rpc_max_retries,
|
|
49
|
+
retry_backoff_base=config.rpc_retry_backoff_base,
|
|
50
|
+
circuit_breaker_seconds=config.rpc_circuit_breaker_seconds,
|
|
51
|
+
rate_limit_per_second=config.rpc_rate_limit_per_second,
|
|
52
|
+
rate_limit_burst=config.rpc_rate_limit_burst,
|
|
53
|
+
chain_id=config.chain_id,
|
|
54
|
+
log_init=False, # Don't log ephemeral broadcast managers
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def broadcast_transaction(
|
|
59
|
+
raw_tx: bytes,
|
|
60
|
+
endpoints: list[str],
|
|
61
|
+
group_name: str | None,
|
|
62
|
+
config: "Config",
|
|
63
|
+
job_id: str | None = None,
|
|
64
|
+
) -> tuple[str, str]:
|
|
65
|
+
"""Broadcast transaction with isolation guarantee.
|
|
66
|
+
|
|
67
|
+
This function creates a dedicated RPCManager for the broadcast,
|
|
68
|
+
ensuring the transaction is only sent to the specified endpoints.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
raw_tx: Signed transaction bytes
|
|
72
|
+
endpoints: Endpoint list (MUST be binding snapshot for retries)
|
|
73
|
+
group_name: Group name for logging/errors (None for ungrouped)
|
|
74
|
+
config: Config for RPC settings
|
|
75
|
+
job_id: Job ID for metrics (optional)
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Tuple of (tx_hash, endpoint_url)
|
|
79
|
+
|
|
80
|
+
Raises:
|
|
81
|
+
RPCGroupUnavailableError: All endpoints in group failed
|
|
82
|
+
RPCFatalError: TX rejected (nonce, funds, revert)
|
|
83
|
+
RPCRecoverableError: TX may succeed with different params
|
|
84
|
+
"""
|
|
85
|
+
manager = create_broadcast_manager(endpoints, config)
|
|
86
|
+
metrics = get_metrics()
|
|
87
|
+
chain_id = config.chain_id
|
|
88
|
+
start_time = time.perf_counter()
|
|
89
|
+
group_label = group_name or "ungrouped"
|
|
90
|
+
|
|
91
|
+
try:
|
|
92
|
+
tx_hash, endpoint_url = manager.send_raw_transaction(raw_tx)
|
|
93
|
+
|
|
94
|
+
# Record success metrics
|
|
95
|
+
latency = time.perf_counter() - start_time
|
|
96
|
+
metrics.counter(BROADCAST_ATTEMPTS).inc(
|
|
97
|
+
chain_id=chain_id,
|
|
98
|
+
job_id=job_id or "unknown",
|
|
99
|
+
broadcast_group=group_label,
|
|
100
|
+
result="success",
|
|
101
|
+
)
|
|
102
|
+
metrics.histogram(BROADCAST_LATENCY_SECONDS).observe(
|
|
103
|
+
latency,
|
|
104
|
+
chain_id=chain_id,
|
|
105
|
+
job_id=job_id or "unknown",
|
|
106
|
+
broadcast_group=group_label,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
return tx_hash, endpoint_url
|
|
110
|
+
|
|
111
|
+
except RPCPoolExhaustedError as e:
|
|
112
|
+
# Record unavailable metrics
|
|
113
|
+
metrics.counter(BROADCAST_ATTEMPTS).inc(
|
|
114
|
+
chain_id=chain_id,
|
|
115
|
+
job_id=job_id or "unknown",
|
|
116
|
+
broadcast_group=group_label,
|
|
117
|
+
result="unavailable",
|
|
118
|
+
)
|
|
119
|
+
# Wrap with group context for user-facing error
|
|
120
|
+
raise RPCGroupUnavailableError(
|
|
121
|
+
f"All endpoints in group '{group_label}' failed",
|
|
122
|
+
group_name=group_name,
|
|
123
|
+
endpoints=e.endpoints,
|
|
124
|
+
last_error=e.last_error,
|
|
125
|
+
) from e
|
|
126
|
+
|
|
127
|
+
except RPCFatalError:
|
|
128
|
+
# Record fatal error metrics
|
|
129
|
+
metrics.counter(BROADCAST_ATTEMPTS).inc(
|
|
130
|
+
chain_id=chain_id,
|
|
131
|
+
job_id=job_id or "unknown",
|
|
132
|
+
broadcast_group=group_label,
|
|
133
|
+
result="fatal",
|
|
134
|
+
)
|
|
135
|
+
raise
|
|
136
|
+
|
|
137
|
+
except RPCRecoverableError:
|
|
138
|
+
# Record recoverable error metrics
|
|
139
|
+
metrics.counter(BROADCAST_ATTEMPTS).inc(
|
|
140
|
+
chain_id=chain_id,
|
|
141
|
+
job_id=job_id or "unknown",
|
|
142
|
+
broadcast_group=group_label,
|
|
143
|
+
result="recoverable",
|
|
144
|
+
)
|
|
145
|
+
raise
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def get_broadcast_endpoints(config: "Config", group_name: str) -> list[str]:
|
|
149
|
+
"""Get endpoints for a broadcast group (already canonical + deduped).
|
|
150
|
+
|
|
151
|
+
This returns the endpoint list from the current config. For first broadcasts,
|
|
152
|
+
this list should be persisted as the binding. For retries, use the
|
|
153
|
+
persisted binding instead of calling this function.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
config: Application configuration
|
|
157
|
+
group_name: Name of the broadcast group
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
List of canonical endpoint URLs
|
|
161
|
+
|
|
162
|
+
Raises:
|
|
163
|
+
ValueError: If group not found or has no endpoints
|
|
164
|
+
"""
|
|
165
|
+
if group_name not in config.rpc_groups:
|
|
166
|
+
raise ValueError(f"Broadcast group '{group_name}' not found")
|
|
167
|
+
|
|
168
|
+
group = config.rpc_groups[group_name]
|
|
169
|
+
if not group.endpoints:
|
|
170
|
+
raise ValueError(f"Broadcast group '{group_name}' has no endpoints")
|
|
171
|
+
|
|
172
|
+
return group.endpoints
|
brawny/_rpc/clients.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
"""RPC client management — shared by TxExecutor and JobRunner.
|
|
2
|
+
|
|
3
|
+
This module provides caching for read RPC clients by group.
|
|
4
|
+
Broadcast clients are created per-call from endpoint snapshots (see broadcast.py).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from typing import TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from brawny.config import Config
|
|
13
|
+
from brawny._rpc.manager import RPCManager
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class RPCClients:
|
|
17
|
+
"""Manages RPC clients for read operations.
|
|
18
|
+
|
|
19
|
+
Caches read clients by group. Broadcast clients are created per-call
|
|
20
|
+
from endpoint snapshots (see broadcast.py).
|
|
21
|
+
|
|
22
|
+
Example:
|
|
23
|
+
clients = RPCClients(config)
|
|
24
|
+
|
|
25
|
+
# Get cached read client for a group
|
|
26
|
+
public_rpc = clients.get_read_client("public")
|
|
27
|
+
private_rpc = clients.get_read_client("private")
|
|
28
|
+
|
|
29
|
+
# Same group = same cached client
|
|
30
|
+
assert clients.get_read_client("public") is public_rpc
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(self, config: "Config") -> None:
|
|
34
|
+
"""Initialize RPC clients manager.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
config: Application configuration
|
|
38
|
+
"""
|
|
39
|
+
self._config = config
|
|
40
|
+
self._read_clients: dict[str, "RPCManager"] = {}
|
|
41
|
+
|
|
42
|
+
def get_read_client(self, group_name: str) -> "RPCManager":
|
|
43
|
+
"""Get (cached) read client for a group.
|
|
44
|
+
|
|
45
|
+
If the group's client hasn't been created yet, creates it.
|
|
46
|
+
Subsequent calls return the same cached instance.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
group_name: Name of the RPC group (e.g., "public", "private")
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
RPCManager configured for the group's endpoints
|
|
53
|
+
|
|
54
|
+
Raises:
|
|
55
|
+
ValueError: If group not found in config.rpc_groups
|
|
56
|
+
"""
|
|
57
|
+
if group_name not in self._read_clients:
|
|
58
|
+
from brawny._rpc.manager import RPCManager
|
|
59
|
+
|
|
60
|
+
if group_name not in self._config.rpc_groups:
|
|
61
|
+
raise ValueError(f"RPC group '{group_name}' not found")
|
|
62
|
+
|
|
63
|
+
group = self._config.rpc_groups[group_name]
|
|
64
|
+
self._read_clients[group_name] = RPCManager(
|
|
65
|
+
endpoints=group.endpoints,
|
|
66
|
+
timeout_seconds=self._config.rpc_timeout_seconds,
|
|
67
|
+
max_retries=self._config.rpc_max_retries,
|
|
68
|
+
retry_backoff_base=self._config.rpc_retry_backoff_base,
|
|
69
|
+
circuit_breaker_seconds=self._config.rpc_circuit_breaker_seconds,
|
|
70
|
+
rate_limit_per_second=self._config.rpc_rate_limit_per_second,
|
|
71
|
+
rate_limit_burst=self._config.rpc_rate_limit_burst,
|
|
72
|
+
chain_id=self._config.chain_id,
|
|
73
|
+
log_init=False, # Daemon already logged main RPC init
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
return self._read_clients[group_name]
|
|
77
|
+
|
|
78
|
+
def get_default_client(self) -> "RPCManager":
|
|
79
|
+
"""Get the default read client.
|
|
80
|
+
|
|
81
|
+
Uses config.rpc_default_group if set, otherwise requires a single rpc_group.
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
RPCManager for the default group
|
|
85
|
+
|
|
86
|
+
Raises:
|
|
87
|
+
ValueError: If default group cannot be resolved
|
|
88
|
+
"""
|
|
89
|
+
from brawny.config.routing import resolve_default_group
|
|
90
|
+
|
|
91
|
+
return self.get_read_client(resolve_default_group(self._config))
|
|
92
|
+
|
|
93
|
+
def clear_cache(self) -> None:
|
|
94
|
+
"""Clear all cached clients.
|
|
95
|
+
|
|
96
|
+
Useful for testing or when config changes require new clients.
|
|
97
|
+
"""
|
|
98
|
+
self._read_clients.clear()
|
brawny/_rpc/context.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
"""RPC job context for attribution metrics.
|
|
2
|
+
|
|
3
|
+
Provides a contextvar to track which job is making RPC calls,
|
|
4
|
+
allowing per-job RPC pressure metrics without high-cardinality labels
|
|
5
|
+
on the main RPC metrics.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from brawny._rpc.context import set_job_context, reset_job_context
|
|
9
|
+
|
|
10
|
+
token = set_job_context(job.job_id)
|
|
11
|
+
try:
|
|
12
|
+
# ... RPC calls here get attributed to job_id ...
|
|
13
|
+
finally:
|
|
14
|
+
reset_job_context(token)
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from contextvars import ContextVar, Token
|
|
18
|
+
|
|
19
|
+
_rpc_job_ctx: ContextVar[str | None] = ContextVar("rpc_job_ctx", default=None)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def set_job_context(job_id: str | None) -> Token:
|
|
23
|
+
"""Set the current job context for RPC attribution.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
job_id: Job ID to attribute RPC calls to, or None to clear
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
Token for resetting context via reset_job_context()
|
|
30
|
+
"""
|
|
31
|
+
return _rpc_job_ctx.set(job_id)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def reset_job_context(token: Token) -> None:
|
|
35
|
+
"""Reset job context to previous value.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
token: Token returned by set_job_context()
|
|
39
|
+
"""
|
|
40
|
+
_rpc_job_ctx.reset(token)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def get_job_context() -> str | None:
|
|
44
|
+
"""Get the current job context.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Job ID if set, None otherwise
|
|
48
|
+
"""
|
|
49
|
+
return _rpc_job_ctx.get()
|