brawny 0.1.13__py3-none-any.whl → 0.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brawny/__init__.py +2 -0
- brawny/_context.py +5 -5
- brawny/_rpc/__init__.py +36 -12
- brawny/_rpc/broadcast.py +14 -13
- brawny/_rpc/caller.py +243 -0
- brawny/_rpc/client.py +539 -0
- brawny/_rpc/clients.py +11 -11
- brawny/_rpc/context.py +23 -0
- brawny/_rpc/errors.py +465 -31
- brawny/_rpc/gas.py +7 -6
- brawny/_rpc/pool.py +18 -0
- brawny/_rpc/retry.py +266 -0
- brawny/_rpc/retry_policy.py +81 -0
- brawny/accounts.py +28 -9
- brawny/alerts/__init__.py +15 -18
- brawny/alerts/abi_resolver.py +212 -36
- brawny/alerts/base.py +2 -2
- brawny/alerts/contracts.py +77 -10
- brawny/alerts/errors.py +30 -3
- brawny/alerts/events.py +38 -5
- brawny/alerts/health.py +19 -13
- brawny/alerts/send.py +513 -55
- brawny/api.py +39 -11
- brawny/assets/AGENTS.md +325 -0
- brawny/async_runtime.py +48 -0
- brawny/chain.py +3 -3
- brawny/cli/commands/__init__.py +2 -0
- brawny/cli/commands/console.py +69 -19
- brawny/cli/commands/contract.py +2 -2
- brawny/cli/commands/controls.py +121 -0
- brawny/cli/commands/health.py +2 -2
- brawny/cli/commands/job_dev.py +6 -5
- brawny/cli/commands/jobs.py +99 -2
- brawny/cli/commands/maintenance.py +13 -29
- brawny/cli/commands/migrate.py +1 -0
- brawny/cli/commands/run.py +10 -3
- brawny/cli/commands/script.py +8 -3
- brawny/cli/commands/signer.py +143 -26
- brawny/cli/helpers.py +0 -3
- brawny/cli_templates.py +25 -349
- brawny/config/__init__.py +4 -1
- brawny/config/models.py +43 -57
- brawny/config/parser.py +268 -57
- brawny/config/validation.py +52 -15
- brawny/daemon/context.py +4 -2
- brawny/daemon/core.py +185 -63
- brawny/daemon/loops.py +166 -98
- brawny/daemon/supervisor.py +261 -0
- brawny/db/__init__.py +14 -26
- brawny/db/base.py +248 -151
- brawny/db/global_cache.py +11 -1
- brawny/db/migrate.py +175 -28
- brawny/db/migrations/001_init.sql +4 -3
- brawny/db/migrations/010_add_nonce_gap_index.sql +1 -1
- brawny/db/migrations/011_add_job_logs.sql +1 -2
- brawny/db/migrations/012_add_claimed_by.sql +2 -2
- brawny/db/migrations/013_attempt_unique.sql +10 -0
- brawny/db/migrations/014_add_lease_expires_at.sql +5 -0
- brawny/db/migrations/015_add_signer_alias.sql +14 -0
- brawny/db/migrations/016_runtime_controls_and_quarantine.sql +32 -0
- brawny/db/migrations/017_add_job_drain.sql +6 -0
- brawny/db/migrations/018_add_nonce_reset_audit.sql +20 -0
- brawny/db/migrations/019_add_job_cooldowns.sql +8 -0
- brawny/db/migrations/020_attempt_unique_initial.sql +7 -0
- brawny/db/ops/__init__.py +3 -25
- brawny/db/ops/logs.py +1 -2
- brawny/db/queries.py +47 -91
- brawny/db/serialized.py +65 -0
- brawny/db/sqlite/__init__.py +1001 -0
- brawny/db/sqlite/connection.py +231 -0
- brawny/db/sqlite/execute.py +116 -0
- brawny/db/sqlite/mappers.py +190 -0
- brawny/db/sqlite/repos/attempts.py +372 -0
- brawny/db/sqlite/repos/block_state.py +102 -0
- brawny/db/sqlite/repos/cache.py +104 -0
- brawny/db/sqlite/repos/intents.py +1021 -0
- brawny/db/sqlite/repos/jobs.py +200 -0
- brawny/db/sqlite/repos/maintenance.py +182 -0
- brawny/db/sqlite/repos/signers_nonces.py +566 -0
- brawny/db/sqlite/tx.py +119 -0
- brawny/http.py +194 -0
- brawny/invariants.py +11 -24
- brawny/jobs/base.py +8 -0
- brawny/jobs/job_validation.py +2 -1
- brawny/keystore.py +83 -7
- brawny/lifecycle.py +64 -12
- brawny/logging.py +0 -2
- brawny/metrics.py +84 -12
- brawny/model/contexts.py +111 -9
- brawny/model/enums.py +1 -0
- brawny/model/errors.py +18 -0
- brawny/model/types.py +47 -131
- brawny/network_guard.py +133 -0
- brawny/networks/__init__.py +5 -5
- brawny/networks/config.py +1 -7
- brawny/networks/manager.py +14 -11
- brawny/runtime_controls.py +74 -0
- brawny/scheduler/poller.py +11 -7
- brawny/scheduler/reorg.py +95 -39
- brawny/scheduler/runner.py +442 -168
- brawny/scheduler/shutdown.py +3 -3
- brawny/script_tx.py +3 -3
- brawny/telegram.py +53 -7
- brawny/testing.py +1 -0
- brawny/timeout.py +38 -0
- brawny/tx/executor.py +922 -308
- brawny/tx/intent.py +54 -16
- brawny/tx/monitor.py +31 -12
- brawny/tx/nonce.py +212 -90
- brawny/tx/replacement.py +69 -18
- brawny/tx/retry_policy.py +24 -0
- brawny/tx/stages/types.py +75 -0
- brawny/types.py +18 -0
- brawny/utils.py +41 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/METADATA +3 -3
- brawny-0.1.22.dist-info/RECORD +163 -0
- brawny/_rpc/manager.py +0 -982
- brawny/_rpc/selector.py +0 -156
- brawny/db/base_new.py +0 -165
- brawny/db/mappers.py +0 -182
- brawny/db/migrations/008_add_transactions.sql +0 -72
- brawny/db/ops/attempts.py +0 -108
- brawny/db/ops/blocks.py +0 -83
- brawny/db/ops/cache.py +0 -93
- brawny/db/ops/intents.py +0 -296
- brawny/db/ops/jobs.py +0 -110
- brawny/db/ops/nonces.py +0 -322
- brawny/db/postgres.py +0 -2535
- brawny/db/postgres_new.py +0 -196
- brawny/db/sqlite.py +0 -2733
- brawny/db/sqlite_new.py +0 -191
- brawny-0.1.13.dist-info/RECORD +0 -141
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/WHEEL +0 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/entry_points.txt +0 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/top_level.txt +0 -0
brawny/_rpc/selector.py
DELETED
|
@@ -1,156 +0,0 @@
|
|
|
1
|
-
"""Endpoint selection with health-aware ordering.
|
|
2
|
-
|
|
3
|
-
This module extracts endpoint health tracking and selection from RPCManager,
|
|
4
|
-
following OE6's separation of concerns.
|
|
5
|
-
|
|
6
|
-
INVARIANT: order_endpoints() always returns ALL endpoints, just ordered.
|
|
7
|
-
Unhealthy endpoints are moved to the end, not removed. This ensures
|
|
8
|
-
recovered endpoints eventually get tried again.
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
from __future__ import annotations
|
|
12
|
-
|
|
13
|
-
import time
|
|
14
|
-
from dataclasses import dataclass, field
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
@dataclass
|
|
18
|
-
class EndpointHealth:
|
|
19
|
-
"""Health tracking for a single RPC endpoint."""
|
|
20
|
-
|
|
21
|
-
url: str
|
|
22
|
-
consecutive_failures: int = 0
|
|
23
|
-
last_success_ts: float | None = None
|
|
24
|
-
last_failure_ts: float | None = None
|
|
25
|
-
latency_ewma_ms: float = 100.0 # Start with reasonable default
|
|
26
|
-
|
|
27
|
-
# EWMA smoothing factor (0.3 = 30% weight to new samples, more responsive than old 0.1)
|
|
28
|
-
EWMA_ALPHA: float = 0.3
|
|
29
|
-
|
|
30
|
-
@property
|
|
31
|
-
def is_healthy(self) -> bool:
|
|
32
|
-
"""Check if endpoint is currently healthy (below failure threshold)."""
|
|
33
|
-
# Threshold is managed by EndpointSelector
|
|
34
|
-
return True # Selector determines health based on threshold
|
|
35
|
-
|
|
36
|
-
def record_success(self, latency_ms: float) -> None:
|
|
37
|
-
"""Record a successful RPC call.
|
|
38
|
-
|
|
39
|
-
Args:
|
|
40
|
-
latency_ms: Request latency in milliseconds
|
|
41
|
-
"""
|
|
42
|
-
self.consecutive_failures = 0
|
|
43
|
-
self.last_success_ts = time.time()
|
|
44
|
-
# EWMA update
|
|
45
|
-
self.latency_ewma_ms = (
|
|
46
|
-
self.EWMA_ALPHA * latency_ms + (1 - self.EWMA_ALPHA) * self.latency_ewma_ms
|
|
47
|
-
)
|
|
48
|
-
|
|
49
|
-
def record_failure(self) -> None:
|
|
50
|
-
"""Record a failed RPC call (transport-class failures only)."""
|
|
51
|
-
self.consecutive_failures += 1
|
|
52
|
-
self.last_failure_ts = time.time()
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
class EndpointSelector:
|
|
56
|
-
"""Health-aware endpoint selection.
|
|
57
|
-
|
|
58
|
-
CONSTRAINTS (to prevent scope creep):
|
|
59
|
-
- Only track consecutive failures + EWMA latency
|
|
60
|
-
- No background probing
|
|
61
|
-
- No partial circuit breaker logic
|
|
62
|
-
- No complex health scoring
|
|
63
|
-
|
|
64
|
-
INVARIANT: order_endpoints() always returns ALL endpoints, just ordered.
|
|
65
|
-
Unhealthy endpoints are moved to the end, not removed. This ensures
|
|
66
|
-
recovered endpoints eventually get tried again.
|
|
67
|
-
"""
|
|
68
|
-
|
|
69
|
-
def __init__(
|
|
70
|
-
self,
|
|
71
|
-
endpoints: list[str],
|
|
72
|
-
failure_threshold: int = 3,
|
|
73
|
-
) -> None:
|
|
74
|
-
"""Initialize endpoint selector.
|
|
75
|
-
|
|
76
|
-
Args:
|
|
77
|
-
endpoints: List of endpoint URLs
|
|
78
|
-
failure_threshold: Consecutive failures before endpoint is unhealthy
|
|
79
|
-
"""
|
|
80
|
-
if not endpoints:
|
|
81
|
-
raise ValueError("At least one endpoint is required")
|
|
82
|
-
|
|
83
|
-
self._endpoints = [EndpointHealth(url=url.strip()) for url in endpoints if url.strip()]
|
|
84
|
-
if not self._endpoints:
|
|
85
|
-
raise ValueError("At least one non-empty endpoint is required")
|
|
86
|
-
|
|
87
|
-
self._failure_threshold = failure_threshold
|
|
88
|
-
self._endpoint_map: dict[str, EndpointHealth] = {e.url: e for e in self._endpoints}
|
|
89
|
-
|
|
90
|
-
@property
|
|
91
|
-
def endpoints(self) -> list[EndpointHealth]:
|
|
92
|
-
"""Get all endpoint health objects."""
|
|
93
|
-
return self._endpoints
|
|
94
|
-
|
|
95
|
-
def get_endpoint(self, url: str) -> EndpointHealth | None:
|
|
96
|
-
"""Get endpoint health by URL."""
|
|
97
|
-
return self._endpoint_map.get(url)
|
|
98
|
-
|
|
99
|
-
def is_healthy(self, endpoint: EndpointHealth) -> bool:
|
|
100
|
-
"""Check if an endpoint is healthy (below failure threshold)."""
|
|
101
|
-
return endpoint.consecutive_failures < self._failure_threshold
|
|
102
|
-
|
|
103
|
-
def has_healthy_endpoint(self) -> bool:
|
|
104
|
-
"""Check if any endpoint is healthy."""
|
|
105
|
-
return any(self.is_healthy(e) for e in self._endpoints)
|
|
106
|
-
|
|
107
|
-
def order_endpoints(self) -> list[EndpointHealth]:
|
|
108
|
-
"""Return ALL endpoints ordered by health, preserving position priority.
|
|
109
|
-
|
|
110
|
-
Ordering:
|
|
111
|
-
1. Healthy endpoints in original order (first = primary)
|
|
112
|
-
2. Unhealthy endpoints in original order
|
|
113
|
-
|
|
114
|
-
Position-based: First healthy endpoint in user config is always preferred.
|
|
115
|
-
"""
|
|
116
|
-
healthy = [e for e in self._endpoints if self.is_healthy(e)]
|
|
117
|
-
unhealthy = [e for e in self._endpoints if not self.is_healthy(e)]
|
|
118
|
-
return healthy + unhealthy
|
|
119
|
-
|
|
120
|
-
def get_active_endpoint(self) -> EndpointHealth:
|
|
121
|
-
"""Get the preferred endpoint (healthiest first).
|
|
122
|
-
|
|
123
|
-
Returns first healthy endpoint. If no healthy endpoints,
|
|
124
|
-
returns least recently failed.
|
|
125
|
-
|
|
126
|
-
Recovery: When an endpoint's consecutive_failures resets to 0 via
|
|
127
|
-
record_success(), it becomes healthy and can be returned again.
|
|
128
|
-
"""
|
|
129
|
-
ordered = self.order_endpoints()
|
|
130
|
-
if ordered:
|
|
131
|
-
return ordered[0]
|
|
132
|
-
# Fallback (should not happen if endpoints exist)
|
|
133
|
-
return self._endpoints[0]
|
|
134
|
-
|
|
135
|
-
def record_success(self, url: str, latency_ms: float) -> None:
|
|
136
|
-
"""Record successful call for an endpoint.
|
|
137
|
-
|
|
138
|
-
Args:
|
|
139
|
-
url: Endpoint URL
|
|
140
|
-
latency_ms: Request latency in milliseconds
|
|
141
|
-
"""
|
|
142
|
-
endpoint = self._endpoint_map.get(url)
|
|
143
|
-
if endpoint:
|
|
144
|
-
endpoint.record_success(latency_ms)
|
|
145
|
-
|
|
146
|
-
def record_failure(self, url: str) -> None:
|
|
147
|
-
"""Record failed call for an endpoint (transport-class failures only).
|
|
148
|
-
|
|
149
|
-
Only call this for RPCRetryableError, not for Fatal/Recoverable errors.
|
|
150
|
-
|
|
151
|
-
Args:
|
|
152
|
-
url: Endpoint URL
|
|
153
|
-
"""
|
|
154
|
-
endpoint = self._endpoint_map.get(url)
|
|
155
|
-
if endpoint:
|
|
156
|
-
endpoint.record_failure()
|
brawny/db/base_new.py
DELETED
|
@@ -1,165 +0,0 @@
|
|
|
1
|
-
"""Slim database interface for brawny.
|
|
2
|
-
|
|
3
|
-
Provides 4 execution primitives + transaction + connect/close.
|
|
4
|
-
All domain operations live in db/ops/ modules.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
from __future__ import annotations
|
|
8
|
-
|
|
9
|
-
from abc import ABC, abstractmethod
|
|
10
|
-
from contextlib import contextmanager
|
|
11
|
-
from dataclasses import dataclass
|
|
12
|
-
from datetime import datetime
|
|
13
|
-
from typing import Any, Iterator, Literal
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
Dialect = Literal["postgres", "sqlite"]
|
|
17
|
-
IsolationLevel = Literal["SERIALIZABLE", "READ COMMITTED", "REPEATABLE READ"]
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
@dataclass
|
|
21
|
-
class BlockState:
|
|
22
|
-
"""Block processing state."""
|
|
23
|
-
|
|
24
|
-
chain_id: int
|
|
25
|
-
last_processed_block_number: int
|
|
26
|
-
last_processed_block_hash: str
|
|
27
|
-
created_at: datetime
|
|
28
|
-
updated_at: datetime
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
@dataclass
|
|
32
|
-
class BlockHashEntry:
|
|
33
|
-
"""Block hash history entry for reorg detection."""
|
|
34
|
-
|
|
35
|
-
id: int
|
|
36
|
-
chain_id: int
|
|
37
|
-
block_number: int
|
|
38
|
-
block_hash: str
|
|
39
|
-
inserted_at: datetime
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
@dataclass
|
|
43
|
-
class ABICacheEntry:
|
|
44
|
-
"""Cached ABI entry."""
|
|
45
|
-
|
|
46
|
-
chain_id: int
|
|
47
|
-
address: str
|
|
48
|
-
abi_json: str
|
|
49
|
-
source: str
|
|
50
|
-
resolved_at: datetime
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
@dataclass
|
|
54
|
-
class ProxyCacheEntry:
|
|
55
|
-
"""Cached proxy resolution entry."""
|
|
56
|
-
|
|
57
|
-
chain_id: int
|
|
58
|
-
proxy_address: str
|
|
59
|
-
implementation_address: str
|
|
60
|
-
resolved_at: datetime
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
class Database(ABC):
|
|
64
|
-
"""Database interface with 4 execution primitives.
|
|
65
|
-
|
|
66
|
-
Implementations provide connection management and query execution.
|
|
67
|
-
SQL queries are in db/queries.py, row mapping in db/mappers.py.
|
|
68
|
-
"""
|
|
69
|
-
|
|
70
|
-
@property
|
|
71
|
-
@abstractmethod
|
|
72
|
-
def dialect(self) -> Dialect:
|
|
73
|
-
"""Return dialect name for query selection."""
|
|
74
|
-
...
|
|
75
|
-
|
|
76
|
-
@abstractmethod
|
|
77
|
-
def connect(self) -> None:
|
|
78
|
-
"""Establish database connection."""
|
|
79
|
-
...
|
|
80
|
-
|
|
81
|
-
@abstractmethod
|
|
82
|
-
def close(self) -> None:
|
|
83
|
-
"""Close database connection and cleanup resources."""
|
|
84
|
-
...
|
|
85
|
-
|
|
86
|
-
@abstractmethod
|
|
87
|
-
def is_connected(self) -> bool:
|
|
88
|
-
"""Check if database is connected."""
|
|
89
|
-
...
|
|
90
|
-
|
|
91
|
-
@abstractmethod
|
|
92
|
-
@contextmanager
|
|
93
|
-
def transaction(
|
|
94
|
-
self, isolation_level: IsolationLevel | None = None
|
|
95
|
-
) -> Iterator[None]:
|
|
96
|
-
"""Context manager for database transactions.
|
|
97
|
-
|
|
98
|
-
Args:
|
|
99
|
-
isolation_level: Optional isolation level (Postgres only, ignored on SQLite)
|
|
100
|
-
|
|
101
|
-
Usage:
|
|
102
|
-
with db.transaction():
|
|
103
|
-
ops.intents.create_intent(db, ...)
|
|
104
|
-
ops.nonces.reserve_nonce(db, ...)
|
|
105
|
-
|
|
106
|
-
# For atomic nonce reservation on Postgres
|
|
107
|
-
with db.transaction(isolation_level="SERIALIZABLE"):
|
|
108
|
-
...
|
|
109
|
-
"""
|
|
110
|
-
...
|
|
111
|
-
|
|
112
|
-
@abstractmethod
|
|
113
|
-
def execute(self, query: str, params: dict[str, Any] | None = None) -> None:
|
|
114
|
-
"""Execute a query without returning results.
|
|
115
|
-
|
|
116
|
-
Args:
|
|
117
|
-
query: SQL with :name placeholders
|
|
118
|
-
params: Dict of parameter values
|
|
119
|
-
"""
|
|
120
|
-
...
|
|
121
|
-
|
|
122
|
-
@abstractmethod
|
|
123
|
-
def fetch_one(
|
|
124
|
-
self, query: str, params: dict[str, Any] | None = None
|
|
125
|
-
) -> dict[str, Any] | None:
|
|
126
|
-
"""Execute a query and return single result or None.
|
|
127
|
-
|
|
128
|
-
Args:
|
|
129
|
-
query: SQL with :name placeholders
|
|
130
|
-
params: Dict of parameter values
|
|
131
|
-
|
|
132
|
-
Returns:
|
|
133
|
-
Single row as dict, or None if no results
|
|
134
|
-
"""
|
|
135
|
-
...
|
|
136
|
-
|
|
137
|
-
@abstractmethod
|
|
138
|
-
def fetch_all(
|
|
139
|
-
self, query: str, params: dict[str, Any] | None = None
|
|
140
|
-
) -> list[dict[str, Any]]:
|
|
141
|
-
"""Execute a query and return all results.
|
|
142
|
-
|
|
143
|
-
Args:
|
|
144
|
-
query: SQL with :name placeholders
|
|
145
|
-
params: Dict of parameter values
|
|
146
|
-
|
|
147
|
-
Returns:
|
|
148
|
-
List of rows as dicts
|
|
149
|
-
"""
|
|
150
|
-
...
|
|
151
|
-
|
|
152
|
-
@abstractmethod
|
|
153
|
-
def execute_rowcount(
|
|
154
|
-
self, query: str, params: dict[str, Any] | None = None
|
|
155
|
-
) -> int:
|
|
156
|
-
"""Execute a query and return affected row count.
|
|
157
|
-
|
|
158
|
-
Args:
|
|
159
|
-
query: SQL with :name placeholders
|
|
160
|
-
params: Dict of parameter values
|
|
161
|
-
|
|
162
|
-
Returns:
|
|
163
|
-
Number of rows affected
|
|
164
|
-
"""
|
|
165
|
-
...
|
brawny/db/mappers.py
DELETED
|
@@ -1,182 +0,0 @@
|
|
|
1
|
-
"""Row to model mappers for database results.
|
|
2
|
-
|
|
3
|
-
Centralized conversion from database rows (dicts) to domain models.
|
|
4
|
-
"""
|
|
5
|
-
|
|
6
|
-
from __future__ import annotations
|
|
7
|
-
|
|
8
|
-
import json
|
|
9
|
-
from typing import Any
|
|
10
|
-
from uuid import UUID
|
|
11
|
-
|
|
12
|
-
from brawny.db.base_new import BlockState, BlockHashEntry, ABICacheEntry, ProxyCacheEntry
|
|
13
|
-
from brawny.model.types import (
|
|
14
|
-
JobConfig,
|
|
15
|
-
TxIntent,
|
|
16
|
-
TxAttempt,
|
|
17
|
-
SignerState,
|
|
18
|
-
NonceReservation,
|
|
19
|
-
GasParams,
|
|
20
|
-
)
|
|
21
|
-
from brawny.model.enums import IntentStatus, AttemptStatus, NonceStatus
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def row_to_block_state(row: dict[str, Any]) -> BlockState:
|
|
25
|
-
"""Convert database row to BlockState."""
|
|
26
|
-
return BlockState(
|
|
27
|
-
chain_id=row["chain_id"],
|
|
28
|
-
last_processed_block_number=row["last_processed_block_number"],
|
|
29
|
-
last_processed_block_hash=row["last_processed_block_hash"],
|
|
30
|
-
created_at=row["created_at"],
|
|
31
|
-
updated_at=row["updated_at"],
|
|
32
|
-
)
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
def row_to_block_hash_entry(row: dict[str, Any]) -> BlockHashEntry:
|
|
36
|
-
"""Convert database row to BlockHashEntry."""
|
|
37
|
-
return BlockHashEntry(
|
|
38
|
-
id=row["id"],
|
|
39
|
-
chain_id=row["chain_id"],
|
|
40
|
-
block_number=row["block_number"],
|
|
41
|
-
block_hash=row["block_hash"],
|
|
42
|
-
inserted_at=row["inserted_at"],
|
|
43
|
-
)
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
def row_to_job_config(row: dict[str, Any]) -> JobConfig:
|
|
47
|
-
"""Convert database row to JobConfig."""
|
|
48
|
-
return JobConfig(
|
|
49
|
-
job_id=row["job_id"],
|
|
50
|
-
job_name=row["job_name"],
|
|
51
|
-
enabled=bool(row["enabled"]),
|
|
52
|
-
check_interval_blocks=row["check_interval_blocks"],
|
|
53
|
-
last_checked_block_number=row["last_checked_block_number"],
|
|
54
|
-
last_triggered_block_number=row["last_triggered_block_number"],
|
|
55
|
-
created_at=row["created_at"],
|
|
56
|
-
updated_at=row["updated_at"],
|
|
57
|
-
)
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
def row_to_signer_state(row: dict[str, Any]) -> SignerState:
|
|
61
|
-
"""Convert database row to SignerState."""
|
|
62
|
-
return SignerState(
|
|
63
|
-
chain_id=row["chain_id"],
|
|
64
|
-
signer_address=row["signer_address"],
|
|
65
|
-
next_nonce=row["next_nonce"],
|
|
66
|
-
last_synced_chain_nonce=row["last_synced_chain_nonce"],
|
|
67
|
-
created_at=row["created_at"],
|
|
68
|
-
updated_at=row["updated_at"],
|
|
69
|
-
gap_started_at=row.get("gap_started_at"),
|
|
70
|
-
alias=row.get("alias"),
|
|
71
|
-
)
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
def row_to_nonce_reservation(row: dict[str, Any]) -> NonceReservation:
|
|
75
|
-
"""Convert database row to NonceReservation."""
|
|
76
|
-
intent_id = row["intent_id"]
|
|
77
|
-
# Handle string UUIDs (SQLite stores as string)
|
|
78
|
-
if intent_id and isinstance(intent_id, str):
|
|
79
|
-
intent_id = UUID(intent_id)
|
|
80
|
-
return NonceReservation(
|
|
81
|
-
id=row["id"],
|
|
82
|
-
chain_id=row["chain_id"],
|
|
83
|
-
signer_address=row["signer_address"],
|
|
84
|
-
nonce=row["nonce"],
|
|
85
|
-
status=NonceStatus(row["status"]),
|
|
86
|
-
intent_id=intent_id,
|
|
87
|
-
created_at=row["created_at"],
|
|
88
|
-
updated_at=row["updated_at"],
|
|
89
|
-
)
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
def row_to_intent(row: dict[str, Any]) -> TxIntent:
|
|
93
|
-
"""Convert database row to TxIntent."""
|
|
94
|
-
intent_id = row["intent_id"]
|
|
95
|
-
# Handle string UUIDs (SQLite stores as string)
|
|
96
|
-
if isinstance(intent_id, str):
|
|
97
|
-
intent_id = UUID(intent_id)
|
|
98
|
-
|
|
99
|
-
# Parse metadata_json once at DB boundary
|
|
100
|
-
metadata_json = row.get("metadata_json")
|
|
101
|
-
metadata = json.loads(metadata_json) if metadata_json else {}
|
|
102
|
-
|
|
103
|
-
return TxIntent(
|
|
104
|
-
intent_id=intent_id,
|
|
105
|
-
job_id=row["job_id"],
|
|
106
|
-
chain_id=row["chain_id"],
|
|
107
|
-
signer_address=row["signer_address"],
|
|
108
|
-
idempotency_key=row["idempotency_key"],
|
|
109
|
-
to_address=row["to_address"],
|
|
110
|
-
data=row["data"],
|
|
111
|
-
value_wei=row["value_wei"],
|
|
112
|
-
gas_limit=row["gas_limit"],
|
|
113
|
-
max_fee_per_gas=row["max_fee_per_gas"],
|
|
114
|
-
max_priority_fee_per_gas=row["max_priority_fee_per_gas"],
|
|
115
|
-
min_confirmations=row["min_confirmations"],
|
|
116
|
-
deadline_ts=row["deadline_ts"],
|
|
117
|
-
retry_after=row.get("retry_after"),
|
|
118
|
-
retry_count=row.get("retry_count", 0),
|
|
119
|
-
status=IntentStatus(row["status"]),
|
|
120
|
-
claim_token=row["claim_token"],
|
|
121
|
-
claimed_at=row["claimed_at"],
|
|
122
|
-
created_at=row["created_at"],
|
|
123
|
-
updated_at=row["updated_at"],
|
|
124
|
-
broadcast_group=row.get("broadcast_group"),
|
|
125
|
-
broadcast_endpoints_json=row.get("broadcast_endpoints_json"),
|
|
126
|
-
metadata=metadata,
|
|
127
|
-
)
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
def row_to_attempt(row: dict[str, Any]) -> TxAttempt:
|
|
131
|
-
"""Convert database row to TxAttempt."""
|
|
132
|
-
attempt_id = row["attempt_id"]
|
|
133
|
-
intent_id = row["intent_id"]
|
|
134
|
-
replaces_attempt_id = row.get("replaces_attempt_id")
|
|
135
|
-
|
|
136
|
-
# Handle string UUIDs (SQLite stores as string)
|
|
137
|
-
if isinstance(attempt_id, str):
|
|
138
|
-
attempt_id = UUID(attempt_id)
|
|
139
|
-
if isinstance(intent_id, str):
|
|
140
|
-
intent_id = UUID(intent_id)
|
|
141
|
-
if replaces_attempt_id and isinstance(replaces_attempt_id, str):
|
|
142
|
-
replaces_attempt_id = UUID(replaces_attempt_id)
|
|
143
|
-
|
|
144
|
-
return TxAttempt(
|
|
145
|
-
attempt_id=attempt_id,
|
|
146
|
-
intent_id=intent_id,
|
|
147
|
-
nonce=row["nonce"],
|
|
148
|
-
tx_hash=row["tx_hash"],
|
|
149
|
-
gas_params=GasParams.from_json(row["gas_params_json"]),
|
|
150
|
-
status=AttemptStatus(row["status"]),
|
|
151
|
-
error_code=row.get("error_code"),
|
|
152
|
-
error_detail=row.get("error_detail"),
|
|
153
|
-
replaces_attempt_id=replaces_attempt_id,
|
|
154
|
-
broadcast_block=row.get("broadcast_block"),
|
|
155
|
-
broadcast_at=row.get("broadcast_at"),
|
|
156
|
-
included_block=row.get("included_block"),
|
|
157
|
-
created_at=row["created_at"],
|
|
158
|
-
updated_at=row["updated_at"],
|
|
159
|
-
broadcast_group=row.get("broadcast_group"),
|
|
160
|
-
endpoint_url=row.get("endpoint_url"),
|
|
161
|
-
)
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
def row_to_abi_cache(row: dict[str, Any]) -> ABICacheEntry:
|
|
165
|
-
"""Convert database row to ABICacheEntry."""
|
|
166
|
-
return ABICacheEntry(
|
|
167
|
-
chain_id=row["chain_id"],
|
|
168
|
-
address=row["address"],
|
|
169
|
-
abi_json=row["abi_json"],
|
|
170
|
-
source=row["source"],
|
|
171
|
-
resolved_at=row["resolved_at"],
|
|
172
|
-
)
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
def row_to_proxy_cache(row: dict[str, Any]) -> ProxyCacheEntry:
|
|
176
|
-
"""Convert database row to ProxyCacheEntry."""
|
|
177
|
-
return ProxyCacheEntry(
|
|
178
|
-
chain_id=row["chain_id"],
|
|
179
|
-
proxy_address=row["proxy_address"],
|
|
180
|
-
implementation_address=row["implementation_address"],
|
|
181
|
-
resolved_at=row["resolved_at"],
|
|
182
|
-
)
|
|
@@ -1,72 +0,0 @@
|
|
|
1
|
-
-- brawny transactions table migration
|
|
2
|
-
-- Version: 008
|
|
3
|
-
-- Description: Add single Transaction model replacing TxIntent + TxAttempt
|
|
4
|
-
--
|
|
5
|
-
-- This is Phase 1 of the ATTEMPT_MODEL.md simplification:
|
|
6
|
-
-- - Single transactions table (no joins)
|
|
7
|
-
-- - 4-state TxStatus (created, broadcast, confirmed, failed)
|
|
8
|
-
-- - JSON blobs for rarely-queried fields
|
|
9
|
-
-- - Append-only tx_hash_history for debugging
|
|
10
|
-
|
|
11
|
-
-- ============================================================================
|
|
12
|
-
-- Transactions - single model for job transaction lifecycle
|
|
13
|
-
-- ============================================================================
|
|
14
|
-
CREATE TABLE IF NOT EXISTS transactions (
|
|
15
|
-
-- Identity (queryable, indexed)
|
|
16
|
-
tx_id UUID PRIMARY KEY,
|
|
17
|
-
job_id VARCHAR(200) NOT NULL,
|
|
18
|
-
chain_id INTEGER NOT NULL,
|
|
19
|
-
idempotency_key VARCHAR(200) NOT NULL,
|
|
20
|
-
|
|
21
|
-
-- Payload (immutable after creation)
|
|
22
|
-
signer_address VARCHAR(42) NOT NULL,
|
|
23
|
-
to_address VARCHAR(42) NOT NULL,
|
|
24
|
-
data TEXT,
|
|
25
|
-
value_wei VARCHAR(78) NOT NULL DEFAULT '0',
|
|
26
|
-
min_confirmations INTEGER NOT NULL DEFAULT 1,
|
|
27
|
-
deadline_ts TIMESTAMP,
|
|
28
|
-
|
|
29
|
-
-- Status (queryable)
|
|
30
|
-
status VARCHAR(20) NOT NULL DEFAULT 'created'
|
|
31
|
-
CHECK (status IN ('created', 'broadcast', 'confirmed', 'failed')),
|
|
32
|
-
failure_type VARCHAR(50),
|
|
33
|
-
|
|
34
|
-
-- Broadcast state (queryable)
|
|
35
|
-
current_tx_hash VARCHAR(66),
|
|
36
|
-
current_nonce BIGINT,
|
|
37
|
-
replacement_count INTEGER NOT NULL DEFAULT 0,
|
|
38
|
-
|
|
39
|
-
-- Worker coordination (queryable)
|
|
40
|
-
claim_token VARCHAR(100),
|
|
41
|
-
claimed_at TIMESTAMP,
|
|
42
|
-
|
|
43
|
-
-- Confirmation (queryable)
|
|
44
|
-
included_block BIGINT,
|
|
45
|
-
confirmed_at TIMESTAMP,
|
|
46
|
-
|
|
47
|
-
-- Audit (queryable)
|
|
48
|
-
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
49
|
-
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
50
|
-
|
|
51
|
-
-- JSON BLOBS (rarely queried - no indexes)
|
|
52
|
-
gas_params_json TEXT, -- {"gas_limit": N, "max_fee": N, "priority_fee": N}
|
|
53
|
-
broadcast_info_json TEXT, -- {"group": str, "endpoints": [...]}
|
|
54
|
-
error_info_json TEXT, -- ErrorInfo as JSON
|
|
55
|
-
tx_hash_history TEXT -- Append-only JSON array of TxHashRecord
|
|
56
|
-
);
|
|
57
|
-
|
|
58
|
-
-- Indexes (only on queryable columns)
|
|
59
|
-
CREATE INDEX IF NOT EXISTS idx_transactions_status ON transactions(status);
|
|
60
|
-
CREATE INDEX IF NOT EXISTS idx_transactions_job_status ON transactions(job_id, status);
|
|
61
|
-
CREATE INDEX IF NOT EXISTS idx_transactions_signer ON transactions(chain_id, signer_address);
|
|
62
|
-
CREATE INDEX IF NOT EXISTS idx_transactions_tx_hash ON transactions(current_tx_hash) WHERE current_tx_hash IS NOT NULL;
|
|
63
|
-
CREATE INDEX IF NOT EXISTS idx_transactions_created ON transactions(created_at);
|
|
64
|
-
|
|
65
|
-
-- Idempotency is scoped to (chain_id, signer_address)
|
|
66
|
-
CREATE UNIQUE INDEX IF NOT EXISTS uq_transactions_idempotency_scoped
|
|
67
|
-
ON transactions(chain_id, signer_address, idempotency_key);
|
|
68
|
-
|
|
69
|
-
-- ============================================================================
|
|
70
|
-
-- Record this migration
|
|
71
|
-
-- ============================================================================
|
|
72
|
-
INSERT INTO schema_migrations (version) VALUES ('008');
|
brawny/db/ops/attempts.py
DELETED
|
@@ -1,108 +0,0 @@
|
|
|
1
|
-
"""Transaction attempt operations."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
from datetime import datetime
|
|
6
|
-
from uuid import UUID
|
|
7
|
-
|
|
8
|
-
from brawny.db.base_new import Database
|
|
9
|
-
from brawny.db import queries as Q
|
|
10
|
-
from brawny.db import mappers as M
|
|
11
|
-
from brawny.model.types import TxAttempt, GasParams
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
def create_attempt(
|
|
15
|
-
db: Database,
|
|
16
|
-
attempt_id: UUID,
|
|
17
|
-
intent_id: UUID,
|
|
18
|
-
nonce: int,
|
|
19
|
-
tx_hash: str | None,
|
|
20
|
-
gas_params: GasParams,
|
|
21
|
-
status: str,
|
|
22
|
-
broadcast_block: int | None = None,
|
|
23
|
-
broadcast_at: datetime | None = None,
|
|
24
|
-
broadcast_group: str | None = None,
|
|
25
|
-
endpoint_url: str | None = None,
|
|
26
|
-
) -> TxAttempt | None:
|
|
27
|
-
"""Create a new transaction attempt."""
|
|
28
|
-
row = db.fetch_one(Q.CREATE_ATTEMPT, {
|
|
29
|
-
"attempt_id": str(attempt_id),
|
|
30
|
-
"intent_id": str(intent_id),
|
|
31
|
-
"nonce": nonce,
|
|
32
|
-
"tx_hash": tx_hash,
|
|
33
|
-
"gas_params_json": gas_params.to_json(),
|
|
34
|
-
"status": status,
|
|
35
|
-
"broadcast_block": broadcast_block,
|
|
36
|
-
"broadcast_at": broadcast_at,
|
|
37
|
-
"broadcast_group": broadcast_group,
|
|
38
|
-
"endpoint_url": endpoint_url,
|
|
39
|
-
})
|
|
40
|
-
return M.row_to_attempt(row) if row else None
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
def get_attempt(db: Database, attempt_id: UUID) -> TxAttempt | None:
|
|
44
|
-
"""Get attempt by ID."""
|
|
45
|
-
row = db.fetch_one(Q.GET_ATTEMPT, {"attempt_id": str(attempt_id)})
|
|
46
|
-
return M.row_to_attempt(row) if row else None
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
def get_attempt_by_tx_hash(db: Database, tx_hash: str) -> TxAttempt | None:
|
|
50
|
-
"""Get attempt by transaction hash."""
|
|
51
|
-
row = db.fetch_one(Q.GET_ATTEMPT_BY_TX_HASH, {"tx_hash": tx_hash})
|
|
52
|
-
return M.row_to_attempt(row) if row else None
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
def get_attempts_for_intent(db: Database, intent_id: UUID) -> list[TxAttempt]:
|
|
56
|
-
"""Get all attempts for an intent, ordered by created_at DESC."""
|
|
57
|
-
rows = db.fetch_all(Q.GET_ATTEMPTS_FOR_INTENT, {"intent_id": str(intent_id)})
|
|
58
|
-
return [M.row_to_attempt(row) for row in rows]
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
def get_latest_attempt_for_intent(db: Database, intent_id: UUID) -> TxAttempt | None:
|
|
62
|
-
"""Get the most recent attempt for an intent."""
|
|
63
|
-
row = db.fetch_one(Q.GET_LATEST_ATTEMPT_FOR_INTENT, {"intent_id": str(intent_id)})
|
|
64
|
-
return M.row_to_attempt(row) if row else None
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
def update_attempt_status(db: Database, attempt_id: UUID, status: str) -> bool:
|
|
68
|
-
"""Update attempt status."""
|
|
69
|
-
count = db.execute_rowcount(Q.UPDATE_ATTEMPT_STATUS, {
|
|
70
|
-
"attempt_id": str(attempt_id),
|
|
71
|
-
"status": status,
|
|
72
|
-
})
|
|
73
|
-
return count > 0
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
def update_attempt_included(
|
|
77
|
-
db: Database, attempt_id: UUID, status: str, included_block: int
|
|
78
|
-
) -> bool:
|
|
79
|
-
"""Update attempt with inclusion info."""
|
|
80
|
-
count = db.execute_rowcount(Q.UPDATE_ATTEMPT_INCLUDED, {
|
|
81
|
-
"attempt_id": str(attempt_id),
|
|
82
|
-
"status": status,
|
|
83
|
-
"included_block": included_block,
|
|
84
|
-
})
|
|
85
|
-
return count > 0
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
def update_attempt_error(
|
|
89
|
-
db: Database,
|
|
90
|
-
attempt_id: UUID,
|
|
91
|
-
status: str,
|
|
92
|
-
error_code: str | None,
|
|
93
|
-
error_detail: str | None,
|
|
94
|
-
) -> bool:
|
|
95
|
-
"""Update attempt with error info."""
|
|
96
|
-
count = db.execute_rowcount(Q.UPDATE_ATTEMPT_ERROR, {
|
|
97
|
-
"attempt_id": str(attempt_id),
|
|
98
|
-
"status": status,
|
|
99
|
-
"error_code": error_code,
|
|
100
|
-
"error_detail": error_detail,
|
|
101
|
-
})
|
|
102
|
-
return count > 0
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
def get_pending_attempts(db: Database, chain_id: int) -> list[TxAttempt]:
|
|
106
|
-
"""Get pending attempts for a chain."""
|
|
107
|
-
rows = db.fetch_all(Q.GET_PENDING_ATTEMPTS, {"chain_id": chain_id})
|
|
108
|
-
return [M.row_to_attempt(row) for row in rows]
|