brawny 0.1.13__py3-none-any.whl → 0.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brawny/__init__.py +2 -0
- brawny/_context.py +5 -5
- brawny/_rpc/__init__.py +36 -12
- brawny/_rpc/broadcast.py +14 -13
- brawny/_rpc/caller.py +243 -0
- brawny/_rpc/client.py +539 -0
- brawny/_rpc/clients.py +11 -11
- brawny/_rpc/context.py +23 -0
- brawny/_rpc/errors.py +465 -31
- brawny/_rpc/gas.py +7 -6
- brawny/_rpc/pool.py +18 -0
- brawny/_rpc/retry.py +266 -0
- brawny/_rpc/retry_policy.py +81 -0
- brawny/accounts.py +28 -9
- brawny/alerts/__init__.py +15 -18
- brawny/alerts/abi_resolver.py +212 -36
- brawny/alerts/base.py +2 -2
- brawny/alerts/contracts.py +77 -10
- brawny/alerts/errors.py +30 -3
- brawny/alerts/events.py +38 -5
- brawny/alerts/health.py +19 -13
- brawny/alerts/send.py +513 -55
- brawny/api.py +39 -11
- brawny/assets/AGENTS.md +325 -0
- brawny/async_runtime.py +48 -0
- brawny/chain.py +3 -3
- brawny/cli/commands/__init__.py +2 -0
- brawny/cli/commands/console.py +69 -19
- brawny/cli/commands/contract.py +2 -2
- brawny/cli/commands/controls.py +121 -0
- brawny/cli/commands/health.py +2 -2
- brawny/cli/commands/job_dev.py +6 -5
- brawny/cli/commands/jobs.py +99 -2
- brawny/cli/commands/maintenance.py +13 -29
- brawny/cli/commands/migrate.py +1 -0
- brawny/cli/commands/run.py +10 -3
- brawny/cli/commands/script.py +8 -3
- brawny/cli/commands/signer.py +143 -26
- brawny/cli/helpers.py +0 -3
- brawny/cli_templates.py +25 -349
- brawny/config/__init__.py +4 -1
- brawny/config/models.py +43 -57
- brawny/config/parser.py +268 -57
- brawny/config/validation.py +52 -15
- brawny/daemon/context.py +4 -2
- brawny/daemon/core.py +185 -63
- brawny/daemon/loops.py +166 -98
- brawny/daemon/supervisor.py +261 -0
- brawny/db/__init__.py +14 -26
- brawny/db/base.py +248 -151
- brawny/db/global_cache.py +11 -1
- brawny/db/migrate.py +175 -28
- brawny/db/migrations/001_init.sql +4 -3
- brawny/db/migrations/010_add_nonce_gap_index.sql +1 -1
- brawny/db/migrations/011_add_job_logs.sql +1 -2
- brawny/db/migrations/012_add_claimed_by.sql +2 -2
- brawny/db/migrations/013_attempt_unique.sql +10 -0
- brawny/db/migrations/014_add_lease_expires_at.sql +5 -0
- brawny/db/migrations/015_add_signer_alias.sql +14 -0
- brawny/db/migrations/016_runtime_controls_and_quarantine.sql +32 -0
- brawny/db/migrations/017_add_job_drain.sql +6 -0
- brawny/db/migrations/018_add_nonce_reset_audit.sql +20 -0
- brawny/db/migrations/019_add_job_cooldowns.sql +8 -0
- brawny/db/migrations/020_attempt_unique_initial.sql +7 -0
- brawny/db/ops/__init__.py +3 -25
- brawny/db/ops/logs.py +1 -2
- brawny/db/queries.py +47 -91
- brawny/db/serialized.py +65 -0
- brawny/db/sqlite/__init__.py +1001 -0
- brawny/db/sqlite/connection.py +231 -0
- brawny/db/sqlite/execute.py +116 -0
- brawny/db/sqlite/mappers.py +190 -0
- brawny/db/sqlite/repos/attempts.py +372 -0
- brawny/db/sqlite/repos/block_state.py +102 -0
- brawny/db/sqlite/repos/cache.py +104 -0
- brawny/db/sqlite/repos/intents.py +1021 -0
- brawny/db/sqlite/repos/jobs.py +200 -0
- brawny/db/sqlite/repos/maintenance.py +182 -0
- brawny/db/sqlite/repos/signers_nonces.py +566 -0
- brawny/db/sqlite/tx.py +119 -0
- brawny/http.py +194 -0
- brawny/invariants.py +11 -24
- brawny/jobs/base.py +8 -0
- brawny/jobs/job_validation.py +2 -1
- brawny/keystore.py +83 -7
- brawny/lifecycle.py +64 -12
- brawny/logging.py +0 -2
- brawny/metrics.py +84 -12
- brawny/model/contexts.py +111 -9
- brawny/model/enums.py +1 -0
- brawny/model/errors.py +18 -0
- brawny/model/types.py +47 -131
- brawny/network_guard.py +133 -0
- brawny/networks/__init__.py +5 -5
- brawny/networks/config.py +1 -7
- brawny/networks/manager.py +14 -11
- brawny/runtime_controls.py +74 -0
- brawny/scheduler/poller.py +11 -7
- brawny/scheduler/reorg.py +95 -39
- brawny/scheduler/runner.py +442 -168
- brawny/scheduler/shutdown.py +3 -3
- brawny/script_tx.py +3 -3
- brawny/telegram.py +53 -7
- brawny/testing.py +1 -0
- brawny/timeout.py +38 -0
- brawny/tx/executor.py +922 -308
- brawny/tx/intent.py +54 -16
- brawny/tx/monitor.py +31 -12
- brawny/tx/nonce.py +212 -90
- brawny/tx/replacement.py +69 -18
- brawny/tx/retry_policy.py +24 -0
- brawny/tx/stages/types.py +75 -0
- brawny/types.py +18 -0
- brawny/utils.py +41 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/METADATA +3 -3
- brawny-0.1.22.dist-info/RECORD +163 -0
- brawny/_rpc/manager.py +0 -982
- brawny/_rpc/selector.py +0 -156
- brawny/db/base_new.py +0 -165
- brawny/db/mappers.py +0 -182
- brawny/db/migrations/008_add_transactions.sql +0 -72
- brawny/db/ops/attempts.py +0 -108
- brawny/db/ops/blocks.py +0 -83
- brawny/db/ops/cache.py +0 -93
- brawny/db/ops/intents.py +0 -296
- brawny/db/ops/jobs.py +0 -110
- brawny/db/ops/nonces.py +0 -322
- brawny/db/postgres.py +0 -2535
- brawny/db/postgres_new.py +0 -196
- brawny/db/sqlite.py +0 -2733
- brawny/db/sqlite_new.py +0 -191
- brawny-0.1.13.dist-info/RECORD +0 -141
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/WHEEL +0 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/entry_points.txt +0 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/top_level.txt +0 -0
brawny/_rpc/retry.py
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
import threading
|
|
5
|
+
from typing import Any, Callable
|
|
6
|
+
|
|
7
|
+
from cachetools import TTLCache
|
|
8
|
+
|
|
9
|
+
from brawny._rpc.errors import (
|
|
10
|
+
RPCDeadlineExceeded,
|
|
11
|
+
RPCError,
|
|
12
|
+
RPCFatalError,
|
|
13
|
+
RPCPoolExhaustedError,
|
|
14
|
+
RpcErrorKind,
|
|
15
|
+
)
|
|
16
|
+
from brawny._rpc.retry_policy import RetryPolicy
|
|
17
|
+
from brawny._rpc.pool import EndpointPool
|
|
18
|
+
from brawny._rpc.caller import Caller
|
|
19
|
+
from brawny._rpc.context import get_intent_budget_context, get_job_context
|
|
20
|
+
from brawny.metrics import (
|
|
21
|
+
RPC_CALL_TIMEOUTS,
|
|
22
|
+
RPC_ERRORS,
|
|
23
|
+
RPC_FAILOVERS,
|
|
24
|
+
RPC_REQUESTS,
|
|
25
|
+
RPC_REQUESTS_BY_JOB,
|
|
26
|
+
RPC_REQUEST_SECONDS,
|
|
27
|
+
get_metrics,
|
|
28
|
+
)
|
|
29
|
+
from brawny.timeout import Deadline
|
|
30
|
+
from brawny.logging import get_logger
|
|
31
|
+
|
|
32
|
+
logger = get_logger(__name__)
|
|
33
|
+
|
|
34
|
+
# High cardinality keys (intent budget keys): maxsize=10K, ttl=15min
|
|
35
|
+
_unknown_budget_counts: TTLCache[str, int] = TTLCache(maxsize=10_000, ttl=900)
|
|
36
|
+
_unknown_budget_lock = threading.Lock()
|
|
37
|
+
_MAX_UNKNOWN_RETRIES_PER_INTENT = 2
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _unknown_budget_exhausted(budget_key: str | None) -> bool:
|
|
41
|
+
if not budget_key:
|
|
42
|
+
return False
|
|
43
|
+
with _unknown_budget_lock:
|
|
44
|
+
current = _unknown_budget_counts.get(budget_key, 0)
|
|
45
|
+
if current >= _MAX_UNKNOWN_RETRIES_PER_INTENT:
|
|
46
|
+
return True
|
|
47
|
+
_unknown_budget_counts[budget_key] = current + 1
|
|
48
|
+
return False
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def call_with_retries(
|
|
52
|
+
pool: EndpointPool,
|
|
53
|
+
caller: Caller,
|
|
54
|
+
policy: RetryPolicy,
|
|
55
|
+
method: str,
|
|
56
|
+
args: tuple[Any, ...],
|
|
57
|
+
*,
|
|
58
|
+
timeout: float,
|
|
59
|
+
deadline: Deadline | None,
|
|
60
|
+
block_identifier: int | str,
|
|
61
|
+
chain_id: int | None,
|
|
62
|
+
request_id: str,
|
|
63
|
+
bound: bool,
|
|
64
|
+
attempt_event: str = "rpc.attempt",
|
|
65
|
+
allowed_hosts: frozenset[str] | None = None,
|
|
66
|
+
return_endpoint: bool = False,
|
|
67
|
+
) -> Any:
|
|
68
|
+
endpoints = pool.order_endpoints()
|
|
69
|
+
attempts_to_try = min(policy.max_attempts, len(endpoints))
|
|
70
|
+
if attempts_to_try <= 0:
|
|
71
|
+
raise RPCPoolExhaustedError("No endpoints available for call", endpoints=[], last_error=None)
|
|
72
|
+
|
|
73
|
+
metrics = get_metrics()
|
|
74
|
+
last_error: Exception | None = None
|
|
75
|
+
|
|
76
|
+
for attempt, endpoint in enumerate(endpoints[:attempts_to_try], start=1):
|
|
77
|
+
if deadline is not None and deadline.expired():
|
|
78
|
+
metrics.counter(RPC_CALL_TIMEOUTS).inc(
|
|
79
|
+
chain_id=chain_id,
|
|
80
|
+
method=method,
|
|
81
|
+
rpc_category=_rpc_category(method),
|
|
82
|
+
rpc_host=_rpc_host(endpoint, allowed_hosts),
|
|
83
|
+
)
|
|
84
|
+
raise RPCDeadlineExceeded(
|
|
85
|
+
"RPC deadline exhausted before call",
|
|
86
|
+
code="deadline_exceeded",
|
|
87
|
+
method=method,
|
|
88
|
+
endpoint=endpoint,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
effective_timeout = timeout
|
|
92
|
+
if deadline is not None:
|
|
93
|
+
remaining = deadline.remaining()
|
|
94
|
+
if remaining <= 0:
|
|
95
|
+
metrics.counter(RPC_CALL_TIMEOUTS).inc(
|
|
96
|
+
chain_id=chain_id,
|
|
97
|
+
method=method,
|
|
98
|
+
rpc_category=_rpc_category(method),
|
|
99
|
+
rpc_host=_rpc_host(endpoint, allowed_hosts),
|
|
100
|
+
)
|
|
101
|
+
raise RPCDeadlineExceeded(
|
|
102
|
+
"RPC deadline exhausted before call",
|
|
103
|
+
code="deadline_exceeded",
|
|
104
|
+
method=method,
|
|
105
|
+
endpoint=endpoint,
|
|
106
|
+
)
|
|
107
|
+
effective_timeout = min(timeout, remaining)
|
|
108
|
+
|
|
109
|
+
metrics.counter(RPC_REQUESTS).inc(
|
|
110
|
+
chain_id=chain_id,
|
|
111
|
+
method=method,
|
|
112
|
+
rpc_category=_rpc_category(method),
|
|
113
|
+
rpc_host=_rpc_host(endpoint, allowed_hosts),
|
|
114
|
+
)
|
|
115
|
+
job_id = get_job_context()
|
|
116
|
+
if job_id:
|
|
117
|
+
metrics.counter(RPC_REQUESTS_BY_JOB).inc(
|
|
118
|
+
chain_id=chain_id,
|
|
119
|
+
job_id=job_id,
|
|
120
|
+
rpc_category=_rpc_category(method),
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
start_time = time.time()
|
|
124
|
+
try:
|
|
125
|
+
result = caller.call(
|
|
126
|
+
endpoint,
|
|
127
|
+
method,
|
|
128
|
+
args,
|
|
129
|
+
timeout=effective_timeout,
|
|
130
|
+
deadline=deadline,
|
|
131
|
+
block_identifier=block_identifier,
|
|
132
|
+
)
|
|
133
|
+
latency = time.time() - start_time
|
|
134
|
+
metrics.histogram(RPC_REQUEST_SECONDS).observe(
|
|
135
|
+
latency,
|
|
136
|
+
chain_id=chain_id,
|
|
137
|
+
method=method,
|
|
138
|
+
rpc_category=_rpc_category(method),
|
|
139
|
+
rpc_host=_rpc_host(endpoint, allowed_hosts),
|
|
140
|
+
)
|
|
141
|
+
log_fields = {
|
|
142
|
+
"chain_id": chain_id,
|
|
143
|
+
"endpoint": _safe_endpoint_label(endpoint),
|
|
144
|
+
"request_id": request_id,
|
|
145
|
+
"method": method,
|
|
146
|
+
"attempt": attempt,
|
|
147
|
+
"policy_name": policy.name,
|
|
148
|
+
"bound": bound,
|
|
149
|
+
"error_class": None,
|
|
150
|
+
}
|
|
151
|
+
if job_id:
|
|
152
|
+
log_fields["job_id"] = job_id
|
|
153
|
+
logger.info(attempt_event, **log_fields)
|
|
154
|
+
if return_endpoint:
|
|
155
|
+
return result, endpoint
|
|
156
|
+
return result
|
|
157
|
+
except RPCError as exc:
|
|
158
|
+
latency = time.time() - start_time
|
|
159
|
+
metrics.histogram(RPC_REQUEST_SECONDS).observe(
|
|
160
|
+
latency,
|
|
161
|
+
chain_id=chain_id,
|
|
162
|
+
method=method,
|
|
163
|
+
rpc_category=_rpc_category(method),
|
|
164
|
+
rpc_host=_rpc_host(endpoint, allowed_hosts),
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
error_class = type(exc)
|
|
168
|
+
should_retry = isinstance(exc, policy.retryable_error_classes)
|
|
169
|
+
failover_ok = getattr(exc, "failover_ok", True)
|
|
170
|
+
error_kind = getattr(exc, "classification_kind", None)
|
|
171
|
+
if error_kind == RpcErrorKind.UNKNOWN and _unknown_budget_exhausted(
|
|
172
|
+
get_intent_budget_context()
|
|
173
|
+
):
|
|
174
|
+
logger.error(
|
|
175
|
+
"rpc.unknown_budget_exhausted",
|
|
176
|
+
budget_key=get_intent_budget_context(),
|
|
177
|
+
method=method,
|
|
178
|
+
endpoint=_safe_endpoint_label(endpoint),
|
|
179
|
+
)
|
|
180
|
+
raise RPCFatalError(
|
|
181
|
+
"unknown_budget_exhausted",
|
|
182
|
+
code="unknown_budget_exhausted",
|
|
183
|
+
method=method,
|
|
184
|
+
endpoint=endpoint,
|
|
185
|
+
) from exc
|
|
186
|
+
|
|
187
|
+
log_fields = {
|
|
188
|
+
"chain_id": chain_id,
|
|
189
|
+
"endpoint": _safe_endpoint_label(endpoint),
|
|
190
|
+
"request_id": request_id,
|
|
191
|
+
"method": method,
|
|
192
|
+
"attempt": attempt,
|
|
193
|
+
"policy_name": policy.name,
|
|
194
|
+
"bound": bound,
|
|
195
|
+
"error_class": error_class.__name__,
|
|
196
|
+
}
|
|
197
|
+
if job_id:
|
|
198
|
+
log_fields["job_id"] = job_id
|
|
199
|
+
logger.info(attempt_event, **log_fields)
|
|
200
|
+
|
|
201
|
+
if not should_retry or not failover_ok:
|
|
202
|
+
raise
|
|
203
|
+
|
|
204
|
+
metrics.counter(RPC_ERRORS).inc(
|
|
205
|
+
chain_id=chain_id,
|
|
206
|
+
method=method,
|
|
207
|
+
rpc_category=_rpc_category(method),
|
|
208
|
+
rpc_host=_rpc_host(endpoint, allowed_hosts),
|
|
209
|
+
)
|
|
210
|
+
last_error = exc
|
|
211
|
+
if attempt < attempts_to_try:
|
|
212
|
+
metrics.counter(RPC_FAILOVERS).inc(chain_id=chain_id, method=method)
|
|
213
|
+
backoff = policy.backoff_seconds(attempt)
|
|
214
|
+
if backoff > 0:
|
|
215
|
+
time.sleep(backoff)
|
|
216
|
+
continue
|
|
217
|
+
except Exception as exc: # noqa: BLE001 - unexpected bug
|
|
218
|
+
log_fields = {
|
|
219
|
+
"chain_id": chain_id,
|
|
220
|
+
"endpoint": _safe_endpoint_label(endpoint),
|
|
221
|
+
"request_id": request_id,
|
|
222
|
+
"method": method,
|
|
223
|
+
"attempt": attempt,
|
|
224
|
+
"policy_name": policy.name,
|
|
225
|
+
"bound": bound,
|
|
226
|
+
"error_class": type(exc).__name__,
|
|
227
|
+
"exc_info": True,
|
|
228
|
+
}
|
|
229
|
+
if job_id:
|
|
230
|
+
log_fields["job_id"] = job_id
|
|
231
|
+
logger.error(attempt_event, **log_fields)
|
|
232
|
+
raise
|
|
233
|
+
|
|
234
|
+
raise RPCPoolExhaustedError(
|
|
235
|
+
f"All {attempts_to_try} attempts failed",
|
|
236
|
+
endpoints=endpoints[:attempts_to_try],
|
|
237
|
+
last_error=last_error,
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def _rpc_category(method: str) -> str:
|
|
242
|
+
return "broadcast" if method in {"eth_sendRawTransaction", "eth_sendTransaction"} else "read"
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def _rpc_host(url: str, allowed_hosts: frozenset[str] | None = None) -> str:
|
|
246
|
+
try:
|
|
247
|
+
split = url.split("://", 1)[1]
|
|
248
|
+
except IndexError:
|
|
249
|
+
return "unknown"
|
|
250
|
+
host = split.split("/", 1)[0]
|
|
251
|
+
host = host.split("@", 1)[-1]
|
|
252
|
+
host = host.split(":", 1)[0]
|
|
253
|
+
if allowed_hosts is not None and host not in allowed_hosts:
|
|
254
|
+
return "other"
|
|
255
|
+
return host or "unknown"
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def _safe_endpoint_label(url: str) -> str:
|
|
259
|
+
parts = url.split("://", 1)
|
|
260
|
+
if len(parts) == 2:
|
|
261
|
+
scheme, rest = parts
|
|
262
|
+
else:
|
|
263
|
+
scheme, rest = "http", parts[0]
|
|
264
|
+
host = rest.split("/", 1)[0]
|
|
265
|
+
host = host.split("@", 1)[-1]
|
|
266
|
+
return f"{scheme}://{host}"
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import random
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import Iterable, Type
|
|
6
|
+
|
|
7
|
+
from brawny._rpc.errors import RPCTransient, RPCRateLimited
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass(frozen=True)
|
|
11
|
+
class RetryPolicy:
|
|
12
|
+
"""Simple retry policy for RPC calls.
|
|
13
|
+
|
|
14
|
+
attempt is 1-based (first attempt is 1).
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
name: str
|
|
18
|
+
max_attempts: int
|
|
19
|
+
base_backoff_seconds: float
|
|
20
|
+
max_backoff_seconds: float
|
|
21
|
+
jitter: bool
|
|
22
|
+
retryable_error_classes: tuple[type[BaseException], ...] = (RPCTransient, RPCRateLimited)
|
|
23
|
+
|
|
24
|
+
def should_retry(self, error_class: type[BaseException]) -> bool:
|
|
25
|
+
return issubclass(error_class, self.retryable_error_classes)
|
|
26
|
+
|
|
27
|
+
def backoff_seconds(self, attempt: int, *, rng: random.Random | None = None) -> float:
|
|
28
|
+
if attempt <= 0:
|
|
29
|
+
return 0.0
|
|
30
|
+
backoff = self.base_backoff_seconds * (2 ** (attempt - 1))
|
|
31
|
+
backoff = min(backoff, self.max_backoff_seconds)
|
|
32
|
+
if not self.jitter:
|
|
33
|
+
return backoff
|
|
34
|
+
rng = rng or random
|
|
35
|
+
jitter = rng.uniform(0.0, min(backoff * 0.1, max(self.max_backoff_seconds - backoff, 0.0)))
|
|
36
|
+
return min(backoff + jitter, self.max_backoff_seconds)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _default_max_backoff(max_attempts: int, base_backoff_seconds: float) -> float:
|
|
40
|
+
if max_attempts <= 0:
|
|
41
|
+
return base_backoff_seconds
|
|
42
|
+
return base_backoff_seconds * (2 ** (max_attempts - 1))
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def policy_from_values(
|
|
46
|
+
name: str,
|
|
47
|
+
*,
|
|
48
|
+
max_attempts: int,
|
|
49
|
+
base_backoff_seconds: float,
|
|
50
|
+
max_backoff_seconds: float | None = None,
|
|
51
|
+
jitter: bool = False,
|
|
52
|
+
retryable_error_classes: Iterable[type[BaseException]] | None = None,
|
|
53
|
+
) -> RetryPolicy:
|
|
54
|
+
if max_backoff_seconds is None:
|
|
55
|
+
max_backoff_seconds = _default_max_backoff(max_attempts, base_backoff_seconds)
|
|
56
|
+
if retryable_error_classes is None:
|
|
57
|
+
retryable_error_classes = (RPCTransient, RPCRateLimited)
|
|
58
|
+
return RetryPolicy(
|
|
59
|
+
name=name,
|
|
60
|
+
max_attempts=max_attempts,
|
|
61
|
+
base_backoff_seconds=base_backoff_seconds,
|
|
62
|
+
max_backoff_seconds=max_backoff_seconds,
|
|
63
|
+
jitter=jitter,
|
|
64
|
+
retryable_error_classes=tuple(retryable_error_classes),
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def fast_read_policy(config) -> RetryPolicy:
|
|
69
|
+
return policy_from_values(
|
|
70
|
+
"FAST_READ",
|
|
71
|
+
max_attempts=config.rpc_max_retries,
|
|
72
|
+
base_backoff_seconds=config.rpc_retry_backoff_base,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def broadcast_policy(config) -> RetryPolicy:
|
|
77
|
+
return policy_from_values(
|
|
78
|
+
"BROADCAST",
|
|
79
|
+
max_attempts=config.rpc_max_retries,
|
|
80
|
+
base_backoff_seconds=config.rpc_retry_backoff_base,
|
|
81
|
+
)
|
brawny/accounts.py
CHANGED
|
@@ -9,9 +9,11 @@ Usage:
|
|
|
9
9
|
# Load by name (prompts for password if needed)
|
|
10
10
|
acct = accounts.load("my_wallet")
|
|
11
11
|
|
|
12
|
-
# Add new account (
|
|
13
|
-
|
|
14
|
-
acct =
|
|
12
|
+
# Add new account (returns GeneratedAccount, includes mnemonic if generated)
|
|
13
|
+
gen = accounts.add() # Generates new key
|
|
14
|
+
acct = gen.account
|
|
15
|
+
gen = accounts.add("0x...") # From private key
|
|
16
|
+
acct = gen.account
|
|
15
17
|
|
|
16
18
|
# Save to keystore
|
|
17
19
|
acct.save("my_wallet")
|
|
@@ -33,6 +35,7 @@ import json
|
|
|
33
35
|
import os
|
|
34
36
|
import sys
|
|
35
37
|
from pathlib import Path
|
|
38
|
+
from dataclasses import dataclass
|
|
36
39
|
from typing import TYPE_CHECKING, Any, Iterator
|
|
37
40
|
|
|
38
41
|
from eth_account import Account as EthAccount
|
|
@@ -42,6 +45,15 @@ if TYPE_CHECKING:
|
|
|
42
45
|
from brawny.jobs.base import TxReceipt
|
|
43
46
|
|
|
44
47
|
_accounts: "Accounts | None" = None
|
|
48
|
+
_hdwallet_enabled = False
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _ensure_hdwallet_enabled() -> None:
|
|
52
|
+
global _hdwallet_enabled
|
|
53
|
+
if _hdwallet_enabled:
|
|
54
|
+
return
|
|
55
|
+
EthAccount.enable_unaudited_hdwallet_features()
|
|
56
|
+
_hdwallet_enabled = True
|
|
45
57
|
|
|
46
58
|
|
|
47
59
|
def _get_accounts_dir() -> Path:
|
|
@@ -241,6 +253,12 @@ class Account:
|
|
|
241
253
|
return hash(self._address.lower())
|
|
242
254
|
|
|
243
255
|
|
|
256
|
+
@dataclass(frozen=True)
|
|
257
|
+
class GeneratedAccount:
|
|
258
|
+
account: Account
|
|
259
|
+
mnemonic: str | None = None
|
|
260
|
+
|
|
261
|
+
|
|
244
262
|
class Accounts:
|
|
245
263
|
"""Container for available signing accounts.
|
|
246
264
|
|
|
@@ -335,7 +353,7 @@ class Accounts:
|
|
|
335
353
|
|
|
336
354
|
return self._register(account)
|
|
337
355
|
|
|
338
|
-
def add(self, private_key: str | bytes | None = None) ->
|
|
356
|
+
def add(self, private_key: str | bytes | None = None) -> GeneratedAccount:
|
|
339
357
|
"""Add account from private key or generate new one.
|
|
340
358
|
|
|
341
359
|
Args:
|
|
@@ -343,17 +361,18 @@ class Accounts:
|
|
|
343
361
|
If None, generates new account with mnemonic.
|
|
344
362
|
|
|
345
363
|
Returns:
|
|
346
|
-
|
|
364
|
+
GeneratedAccount with mnemonic if generated
|
|
347
365
|
"""
|
|
348
366
|
if private_key is None:
|
|
349
367
|
# Generate new account with mnemonic
|
|
368
|
+
_ensure_hdwallet_enabled()
|
|
350
369
|
mnemonic = generate_mnemonic(num_words=12, lang="english")
|
|
351
|
-
print(f"mnemonic: '{mnemonic}'")
|
|
352
370
|
eth_acct = EthAccount.from_mnemonic(mnemonic)
|
|
353
371
|
account = Account(
|
|
354
372
|
address=eth_acct.address,
|
|
355
373
|
private_key=eth_acct.key,
|
|
356
374
|
)
|
|
375
|
+
return GeneratedAccount(account=self._register(account), mnemonic=mnemonic)
|
|
357
376
|
else:
|
|
358
377
|
# From provided key
|
|
359
378
|
if isinstance(private_key, str):
|
|
@@ -364,8 +383,7 @@ class Accounts:
|
|
|
364
383
|
address=eth_acct.address,
|
|
365
384
|
private_key=eth_acct.key,
|
|
366
385
|
)
|
|
367
|
-
|
|
368
|
-
return self._register(account)
|
|
386
|
+
return GeneratedAccount(account=self._register(account), mnemonic=None)
|
|
369
387
|
|
|
370
388
|
def from_mnemonic(
|
|
371
389
|
self,
|
|
@@ -386,6 +404,7 @@ class Accounts:
|
|
|
386
404
|
Single Account if count=1, else list of Accounts
|
|
387
405
|
"""
|
|
388
406
|
results = []
|
|
407
|
+
_ensure_hdwallet_enabled()
|
|
389
408
|
for i in range(offset, offset + count):
|
|
390
409
|
# Standard Ethereum derivation path
|
|
391
410
|
path = f"m/44'/60'/0'/0/{i}"
|
|
@@ -497,7 +516,7 @@ class _AccountsProxy:
|
|
|
497
516
|
def load(self, filename: str, password: str | None = None) -> Account:
|
|
498
517
|
return _get_accounts().load(filename, password)
|
|
499
518
|
|
|
500
|
-
def add(self, private_key: str | bytes | None = None) ->
|
|
519
|
+
def add(self, private_key: str | bytes | None = None) -> GeneratedAccount:
|
|
501
520
|
return _get_accounts().add(private_key)
|
|
502
521
|
|
|
503
522
|
def from_mnemonic(
|
brawny/alerts/__init__.py
CHANGED
|
@@ -1,15 +1,16 @@
|
|
|
1
1
|
"""Alerts extension with contract handles, ABI resolution, and event decoding.
|
|
2
2
|
|
|
3
3
|
This extension provides an ergonomic interface for job authors to:
|
|
4
|
-
-
|
|
4
|
+
- Send alerts from lifecycle hooks via ctx.alert()
|
|
5
|
+
- Interact with contracts in hooks
|
|
5
6
|
- Decode events from transaction receipts (brownie-compatible)
|
|
6
7
|
- Make contract reads
|
|
7
8
|
- Format messages with explorer links
|
|
8
9
|
|
|
9
10
|
Key components:
|
|
10
|
-
-
|
|
11
|
+
- SuccessContext: Context passed to on_success with receipt + events
|
|
12
|
+
- FailureContext: Context passed to on_failure with error info
|
|
11
13
|
- ContractHandle: Interface for contract function calls
|
|
12
|
-
- EventDict: Brownie-compatible event container
|
|
13
14
|
- ABIResolver: Automatic ABI resolution with caching
|
|
14
15
|
|
|
15
16
|
Formatting helpers (Markdown is the default):
|
|
@@ -17,27 +18,23 @@ Formatting helpers (Markdown is the default):
|
|
|
17
18
|
- explorer_link(hash): "[🔗 View on Explorer](url)"
|
|
18
19
|
- escape_markdown_v2(text): Escapes special characters
|
|
19
20
|
|
|
20
|
-
Usage in
|
|
21
|
+
Usage in lifecycle hooks:
|
|
21
22
|
|
|
22
|
-
from brawny import Contract
|
|
23
23
|
from brawny.alerts import shorten, explorer_link
|
|
24
|
+
from brawny.model.contexts import SuccessContext
|
|
24
25
|
|
|
25
|
-
def
|
|
26
|
-
# Get contract handle (brownie-style)
|
|
27
|
-
token = Contract("token")
|
|
28
|
-
|
|
26
|
+
def on_success(self, ctx: SuccessContext) -> None:
|
|
29
27
|
# Decode events from receipt (brownie-compatible)
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
# Make contract reads
|
|
34
|
-
symbol = token.symbol()
|
|
35
|
-
decimals = token.decimals()
|
|
28
|
+
if ctx.events:
|
|
29
|
+
deposit = ctx.events[0] # First decoded event
|
|
30
|
+
amount = deposit["assets"]
|
|
36
31
|
|
|
37
|
-
|
|
38
|
-
|
|
32
|
+
# Format with explorer links
|
|
33
|
+
tx_link = explorer_link(ctx.receipt.transaction_hash)
|
|
39
34
|
|
|
40
|
-
|
|
35
|
+
ctx.alert(f"Deposited {amount}\\n{tx_link}")
|
|
36
|
+
else:
|
|
37
|
+
ctx.alert(f"Confirmed: {shorten(ctx.receipt.transaction_hash)}")
|
|
41
38
|
"""
|
|
42
39
|
|
|
43
40
|
from brawny.alerts.context import AlertContext, JobMetadata
|