brawny 0.1.13__py3-none-any.whl → 0.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brawny/__init__.py +2 -0
- brawny/_context.py +5 -5
- brawny/_rpc/__init__.py +36 -12
- brawny/_rpc/broadcast.py +14 -13
- brawny/_rpc/caller.py +243 -0
- brawny/_rpc/client.py +539 -0
- brawny/_rpc/clients.py +11 -11
- brawny/_rpc/context.py +23 -0
- brawny/_rpc/errors.py +465 -31
- brawny/_rpc/gas.py +7 -6
- brawny/_rpc/pool.py +18 -0
- brawny/_rpc/retry.py +266 -0
- brawny/_rpc/retry_policy.py +81 -0
- brawny/accounts.py +28 -9
- brawny/alerts/__init__.py +15 -18
- brawny/alerts/abi_resolver.py +212 -36
- brawny/alerts/base.py +2 -2
- brawny/alerts/contracts.py +77 -10
- brawny/alerts/errors.py +30 -3
- brawny/alerts/events.py +38 -5
- brawny/alerts/health.py +19 -13
- brawny/alerts/send.py +513 -55
- brawny/api.py +39 -11
- brawny/assets/AGENTS.md +325 -0
- brawny/async_runtime.py +48 -0
- brawny/chain.py +3 -3
- brawny/cli/commands/__init__.py +2 -0
- brawny/cli/commands/console.py +69 -19
- brawny/cli/commands/contract.py +2 -2
- brawny/cli/commands/controls.py +121 -0
- brawny/cli/commands/health.py +2 -2
- brawny/cli/commands/job_dev.py +6 -5
- brawny/cli/commands/jobs.py +99 -2
- brawny/cli/commands/maintenance.py +13 -29
- brawny/cli/commands/migrate.py +1 -0
- brawny/cli/commands/run.py +10 -3
- brawny/cli/commands/script.py +8 -3
- brawny/cli/commands/signer.py +143 -26
- brawny/cli/helpers.py +0 -3
- brawny/cli_templates.py +25 -349
- brawny/config/__init__.py +4 -1
- brawny/config/models.py +43 -57
- brawny/config/parser.py +268 -57
- brawny/config/validation.py +52 -15
- brawny/daemon/context.py +4 -2
- brawny/daemon/core.py +185 -63
- brawny/daemon/loops.py +166 -98
- brawny/daemon/supervisor.py +261 -0
- brawny/db/__init__.py +14 -26
- brawny/db/base.py +248 -151
- brawny/db/global_cache.py +11 -1
- brawny/db/migrate.py +175 -28
- brawny/db/migrations/001_init.sql +4 -3
- brawny/db/migrations/010_add_nonce_gap_index.sql +1 -1
- brawny/db/migrations/011_add_job_logs.sql +1 -2
- brawny/db/migrations/012_add_claimed_by.sql +2 -2
- brawny/db/migrations/013_attempt_unique.sql +10 -0
- brawny/db/migrations/014_add_lease_expires_at.sql +5 -0
- brawny/db/migrations/015_add_signer_alias.sql +14 -0
- brawny/db/migrations/016_runtime_controls_and_quarantine.sql +32 -0
- brawny/db/migrations/017_add_job_drain.sql +6 -0
- brawny/db/migrations/018_add_nonce_reset_audit.sql +20 -0
- brawny/db/migrations/019_add_job_cooldowns.sql +8 -0
- brawny/db/migrations/020_attempt_unique_initial.sql +7 -0
- brawny/db/ops/__init__.py +3 -25
- brawny/db/ops/logs.py +1 -2
- brawny/db/queries.py +47 -91
- brawny/db/serialized.py +65 -0
- brawny/db/sqlite/__init__.py +1001 -0
- brawny/db/sqlite/connection.py +231 -0
- brawny/db/sqlite/execute.py +116 -0
- brawny/db/sqlite/mappers.py +190 -0
- brawny/db/sqlite/repos/attempts.py +372 -0
- brawny/db/sqlite/repos/block_state.py +102 -0
- brawny/db/sqlite/repos/cache.py +104 -0
- brawny/db/sqlite/repos/intents.py +1021 -0
- brawny/db/sqlite/repos/jobs.py +200 -0
- brawny/db/sqlite/repos/maintenance.py +182 -0
- brawny/db/sqlite/repos/signers_nonces.py +566 -0
- brawny/db/sqlite/tx.py +119 -0
- brawny/http.py +194 -0
- brawny/invariants.py +11 -24
- brawny/jobs/base.py +8 -0
- brawny/jobs/job_validation.py +2 -1
- brawny/keystore.py +83 -7
- brawny/lifecycle.py +64 -12
- brawny/logging.py +0 -2
- brawny/metrics.py +84 -12
- brawny/model/contexts.py +111 -9
- brawny/model/enums.py +1 -0
- brawny/model/errors.py +18 -0
- brawny/model/types.py +47 -131
- brawny/network_guard.py +133 -0
- brawny/networks/__init__.py +5 -5
- brawny/networks/config.py +1 -7
- brawny/networks/manager.py +14 -11
- brawny/runtime_controls.py +74 -0
- brawny/scheduler/poller.py +11 -7
- brawny/scheduler/reorg.py +95 -39
- brawny/scheduler/runner.py +442 -168
- brawny/scheduler/shutdown.py +3 -3
- brawny/script_tx.py +3 -3
- brawny/telegram.py +53 -7
- brawny/testing.py +1 -0
- brawny/timeout.py +38 -0
- brawny/tx/executor.py +922 -308
- brawny/tx/intent.py +54 -16
- brawny/tx/monitor.py +31 -12
- brawny/tx/nonce.py +212 -90
- brawny/tx/replacement.py +69 -18
- brawny/tx/retry_policy.py +24 -0
- brawny/tx/stages/types.py +75 -0
- brawny/types.py +18 -0
- brawny/utils.py +41 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/METADATA +3 -3
- brawny-0.1.22.dist-info/RECORD +163 -0
- brawny/_rpc/manager.py +0 -982
- brawny/_rpc/selector.py +0 -156
- brawny/db/base_new.py +0 -165
- brawny/db/mappers.py +0 -182
- brawny/db/migrations/008_add_transactions.sql +0 -72
- brawny/db/ops/attempts.py +0 -108
- brawny/db/ops/blocks.py +0 -83
- brawny/db/ops/cache.py +0 -93
- brawny/db/ops/intents.py +0 -296
- brawny/db/ops/jobs.py +0 -110
- brawny/db/ops/nonces.py +0 -322
- brawny/db/postgres.py +0 -2535
- brawny/db/postgres_new.py +0 -196
- brawny/db/sqlite.py +0 -2733
- brawny/db/sqlite_new.py +0 -191
- brawny-0.1.13.dist-info/RECORD +0 -141
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/WHEEL +0 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/entry_points.txt +0 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/top_level.txt +0 -0
brawny/db/postgres.py
DELETED
|
@@ -1,2535 +0,0 @@
|
|
|
1
|
-
"""PostgreSQL database implementation for brawny.
|
|
2
|
-
|
|
3
|
-
PostgreSQL is the production database. Features:
|
|
4
|
-
- Connection pooling
|
|
5
|
-
- SERIALIZABLE isolation for nonce reservation
|
|
6
|
-
- FOR UPDATE SKIP LOCKED for intent claiming
|
|
7
|
-
- Proper transaction management
|
|
8
|
-
"""
|
|
9
|
-
|
|
10
|
-
from __future__ import annotations
|
|
11
|
-
|
|
12
|
-
import json
|
|
13
|
-
import random
|
|
14
|
-
import threading
|
|
15
|
-
import time
|
|
16
|
-
from contextlib import contextmanager
|
|
17
|
-
from datetime import datetime, timezone
|
|
18
|
-
from typing import Any, Iterator
|
|
19
|
-
from uuid import UUID
|
|
20
|
-
|
|
21
|
-
import psycopg
|
|
22
|
-
from psycopg.rows import dict_row
|
|
23
|
-
from psycopg_pool import ConnectionPool
|
|
24
|
-
|
|
25
|
-
from brawny.db.base import (
|
|
26
|
-
ABICacheEntry,
|
|
27
|
-
BlockState,
|
|
28
|
-
Database,
|
|
29
|
-
IsolationLevel,
|
|
30
|
-
ProxyCacheEntry,
|
|
31
|
-
)
|
|
32
|
-
from brawny.db.circuit_breaker import DatabaseCircuitBreaker
|
|
33
|
-
from brawny.logging import get_logger
|
|
34
|
-
from brawny.model.enums import AttemptStatus, IntentStatus, NonceStatus, TxStatus
|
|
35
|
-
from brawny.model.errors import DatabaseError, ErrorInfo, FailureType
|
|
36
|
-
from brawny.model.types import (
|
|
37
|
-
BroadcastInfo,
|
|
38
|
-
GasParams,
|
|
39
|
-
JobConfig,
|
|
40
|
-
NonceReservation,
|
|
41
|
-
SignerState,
|
|
42
|
-
Transaction,
|
|
43
|
-
TxAttempt,
|
|
44
|
-
TxHashRecord,
|
|
45
|
-
TxIntent,
|
|
46
|
-
)
|
|
47
|
-
|
|
48
|
-
logger = get_logger(__name__)
|
|
49
|
-
|
|
50
|
-
# Constants for serialization retry
|
|
51
|
-
SERIALIZATION_FAILURE_SQLSTATE = "40001"
|
|
52
|
-
MAX_SERIALIZATION_RETRIES = 5
|
|
53
|
-
BASE_RETRY_DELAY_MS = 10.0
|
|
54
|
-
MAX_RETRY_DELAY_MS = 500.0
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
def _is_serialization_failure(e: Exception) -> bool:
|
|
58
|
-
"""Check if exception is a PostgreSQL serialization failure.
|
|
59
|
-
|
|
60
|
-
Walks the exception cause chain to find SQLSTATE 40001.
|
|
61
|
-
"""
|
|
62
|
-
for _ in range(3): # Max depth to prevent infinite loops
|
|
63
|
-
if hasattr(e, "pgcode") and e.pgcode == SERIALIZATION_FAILURE_SQLSTATE:
|
|
64
|
-
return True
|
|
65
|
-
if hasattr(e, "sqlstate") and e.sqlstate == SERIALIZATION_FAILURE_SQLSTATE:
|
|
66
|
-
return True
|
|
67
|
-
if hasattr(e, "__cause__") and e.__cause__ is not None:
|
|
68
|
-
e = e.__cause__
|
|
69
|
-
else:
|
|
70
|
-
break
|
|
71
|
-
return False
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
def _convert_named_params(query: str, params: dict[str, Any]) -> tuple[str, tuple[Any, ...]]:
|
|
75
|
-
"""Convert :name placeholder query to %s positional params for psycopg.
|
|
76
|
-
|
|
77
|
-
Args:
|
|
78
|
-
query: SQL query with :name placeholders
|
|
79
|
-
params: Dict of parameter values
|
|
80
|
-
|
|
81
|
-
Returns:
|
|
82
|
-
Tuple of (converted_query, positional_params)
|
|
83
|
-
"""
|
|
84
|
-
import re
|
|
85
|
-
# Find all :name placeholders (not ::type casts)
|
|
86
|
-
# (?<!:) ensures we don't match the second colon in ::
|
|
87
|
-
pattern = r'(?<!:):([a-zA-Z_][a-zA-Z0-9_]*)(?![a-zA-Z0-9_:])'
|
|
88
|
-
matches = list(re.finditer(pattern, query))
|
|
89
|
-
|
|
90
|
-
if not matches:
|
|
91
|
-
return query, ()
|
|
92
|
-
|
|
93
|
-
# Build positional params in order of appearance
|
|
94
|
-
positional_params = []
|
|
95
|
-
converted_query = query
|
|
96
|
-
# Process in reverse to maintain string indices
|
|
97
|
-
for match in reversed(matches):
|
|
98
|
-
param_name = match.group(1)
|
|
99
|
-
if param_name not in params:
|
|
100
|
-
raise DatabaseError(f"Missing parameter: {param_name}")
|
|
101
|
-
positional_params.insert(0, params[param_name])
|
|
102
|
-
converted_query = converted_query[:match.start()] + "%s" + converted_query[match.end():]
|
|
103
|
-
|
|
104
|
-
return converted_query, tuple(positional_params)
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
class PostgresDatabase(Database):
|
|
108
|
-
"""PostgreSQL implementation of the Database interface.
|
|
109
|
-
|
|
110
|
-
Uses psycopg with a synchronous connection pool to avoid
|
|
111
|
-
event-loop conflicts across threads.
|
|
112
|
-
"""
|
|
113
|
-
|
|
114
|
-
def __init__(
|
|
115
|
-
self,
|
|
116
|
-
database_url: str,
|
|
117
|
-
pool_size: int = 5,
|
|
118
|
-
pool_max_overflow: int = 10,
|
|
119
|
-
pool_timeout: float = 30.0,
|
|
120
|
-
circuit_breaker_failures: int = 5,
|
|
121
|
-
circuit_breaker_seconds: int = 30,
|
|
122
|
-
) -> None:
|
|
123
|
-
"""Initialize PostgreSQL database.
|
|
124
|
-
|
|
125
|
-
Args:
|
|
126
|
-
database_url: PostgreSQL connection URL
|
|
127
|
-
pool_size: Minimum pool connections
|
|
128
|
-
pool_max_overflow: Maximum additional connections
|
|
129
|
-
pool_timeout: Connection acquisition timeout
|
|
130
|
-
"""
|
|
131
|
-
self._database_url = database_url
|
|
132
|
-
self._pool_size = pool_size
|
|
133
|
-
self._pool_max_size = pool_size + pool_max_overflow
|
|
134
|
-
self._pool_timeout = pool_timeout
|
|
135
|
-
self._pool: ConnectionPool | None = None
|
|
136
|
-
self._isolation_level: IsolationLevel | None = None
|
|
137
|
-
self._local = threading.local()
|
|
138
|
-
self._circuit_breaker = DatabaseCircuitBreaker(
|
|
139
|
-
failure_threshold=circuit_breaker_failures,
|
|
140
|
-
open_seconds=circuit_breaker_seconds,
|
|
141
|
-
backend="postgres",
|
|
142
|
-
)
|
|
143
|
-
|
|
144
|
-
@property
|
|
145
|
-
def dialect(self) -> str:
|
|
146
|
-
"""Return dialect name for query selection."""
|
|
147
|
-
return "postgres"
|
|
148
|
-
|
|
149
|
-
def connect(self) -> None:
|
|
150
|
-
"""Establish database connection pool."""
|
|
151
|
-
if self._pool is not None:
|
|
152
|
-
return
|
|
153
|
-
self._pool = ConnectionPool(
|
|
154
|
-
self._database_url,
|
|
155
|
-
min_size=self._pool_size,
|
|
156
|
-
max_size=self._pool_max_size,
|
|
157
|
-
timeout=self._pool_timeout,
|
|
158
|
-
)
|
|
159
|
-
|
|
160
|
-
def close(self) -> None:
|
|
161
|
-
"""Close database connection pool."""
|
|
162
|
-
if self._pool:
|
|
163
|
-
self._pool.close()
|
|
164
|
-
self._pool = None
|
|
165
|
-
|
|
166
|
-
def is_connected(self) -> bool:
|
|
167
|
-
"""Check if database is connected."""
|
|
168
|
-
return self._pool is not None
|
|
169
|
-
|
|
170
|
-
def _ensure_pool(self) -> ConnectionPool:
|
|
171
|
-
"""Ensure pool exists and return it."""
|
|
172
|
-
if self._pool is None:
|
|
173
|
-
raise DatabaseError("Database not connected. Call connect() first.")
|
|
174
|
-
return self._pool
|
|
175
|
-
|
|
176
|
-
def _get_current_conn(self) -> psycopg.Connection | None:
|
|
177
|
-
return getattr(self._local, "conn", None)
|
|
178
|
-
|
|
179
|
-
@contextmanager
|
|
180
|
-
def transaction(
|
|
181
|
-
self, isolation_level: IsolationLevel | None = None
|
|
182
|
-
) -> Iterator[None]:
|
|
183
|
-
"""Context manager for database transactions."""
|
|
184
|
-
if self._get_current_conn() is not None:
|
|
185
|
-
raise DatabaseError("Nested transactions are not supported in PostgresDatabase.")
|
|
186
|
-
|
|
187
|
-
pool = self._ensure_pool()
|
|
188
|
-
with pool.connection() as conn:
|
|
189
|
-
conn.row_factory = dict_row
|
|
190
|
-
with conn.transaction():
|
|
191
|
-
if isolation_level:
|
|
192
|
-
conn.execute(f"SET TRANSACTION ISOLATION LEVEL {isolation_level}")
|
|
193
|
-
self._local.conn = conn
|
|
194
|
-
try:
|
|
195
|
-
yield
|
|
196
|
-
finally:
|
|
197
|
-
self._local.conn = None
|
|
198
|
-
|
|
199
|
-
def _execute(
|
|
200
|
-
self,
|
|
201
|
-
query: str,
|
|
202
|
-
params: tuple[Any, ...] | None = None,
|
|
203
|
-
) -> None:
|
|
204
|
-
"""Execute a query without returning results."""
|
|
205
|
-
self._circuit_breaker.before_call()
|
|
206
|
-
conn = self._get_current_conn()
|
|
207
|
-
try:
|
|
208
|
-
if conn is not None:
|
|
209
|
-
conn.execute(query, params or ())
|
|
210
|
-
self._circuit_breaker.record_success()
|
|
211
|
-
return
|
|
212
|
-
|
|
213
|
-
pool = self._ensure_pool()
|
|
214
|
-
with pool.connection() as conn:
|
|
215
|
-
conn.row_factory = dict_row
|
|
216
|
-
with conn.transaction():
|
|
217
|
-
if self._isolation_level:
|
|
218
|
-
conn.execute(f"SET TRANSACTION ISOLATION LEVEL {self._isolation_level}")
|
|
219
|
-
conn.execute(query, params or ())
|
|
220
|
-
self._circuit_breaker.record_success()
|
|
221
|
-
except psycopg.Error as e:
|
|
222
|
-
self._circuit_breaker.record_failure(e)
|
|
223
|
-
raise DatabaseError(f"Postgres query failed: {e}") from e
|
|
224
|
-
|
|
225
|
-
def _fetch_all(
|
|
226
|
-
self,
|
|
227
|
-
query: str,
|
|
228
|
-
params: tuple[Any, ...] | None = None,
|
|
229
|
-
) -> list[dict[str, Any]]:
|
|
230
|
-
"""Execute a query and return all results."""
|
|
231
|
-
self._circuit_breaker.before_call()
|
|
232
|
-
conn = self._get_current_conn()
|
|
233
|
-
try:
|
|
234
|
-
if conn is not None:
|
|
235
|
-
result = conn.execute(query, params or ()).fetchall()
|
|
236
|
-
self._circuit_breaker.record_success()
|
|
237
|
-
return result
|
|
238
|
-
|
|
239
|
-
pool = self._ensure_pool()
|
|
240
|
-
with pool.connection() as conn:
|
|
241
|
-
conn.row_factory = dict_row
|
|
242
|
-
with conn.transaction():
|
|
243
|
-
if self._isolation_level:
|
|
244
|
-
conn.execute(f"SET TRANSACTION ISOLATION LEVEL {self._isolation_level}")
|
|
245
|
-
result = conn.execute(query, params or ()).fetchall()
|
|
246
|
-
self._circuit_breaker.record_success()
|
|
247
|
-
return result
|
|
248
|
-
except psycopg.Error as e:
|
|
249
|
-
self._circuit_breaker.record_failure(e)
|
|
250
|
-
raise DatabaseError(f"Postgres query failed: {e}") from e
|
|
251
|
-
|
|
252
|
-
def _fetch_one(
|
|
253
|
-
self,
|
|
254
|
-
query: str,
|
|
255
|
-
params: tuple[Any, ...] | None = None,
|
|
256
|
-
) -> dict[str, Any] | None:
|
|
257
|
-
"""Execute a query and return a single result."""
|
|
258
|
-
self._circuit_breaker.before_call()
|
|
259
|
-
conn = self._get_current_conn()
|
|
260
|
-
try:
|
|
261
|
-
if conn is not None:
|
|
262
|
-
result = conn.execute(query, params or ()).fetchone()
|
|
263
|
-
self._circuit_breaker.record_success()
|
|
264
|
-
return result
|
|
265
|
-
|
|
266
|
-
pool = self._ensure_pool()
|
|
267
|
-
with pool.connection() as conn:
|
|
268
|
-
conn.row_factory = dict_row
|
|
269
|
-
with conn.transaction():
|
|
270
|
-
if self._isolation_level:
|
|
271
|
-
conn.execute(f"SET TRANSACTION ISOLATION LEVEL {self._isolation_level}")
|
|
272
|
-
result = conn.execute(query, params or ()).fetchone()
|
|
273
|
-
self._circuit_breaker.record_success()
|
|
274
|
-
return result
|
|
275
|
-
except psycopg.Error as e:
|
|
276
|
-
self._circuit_breaker.record_failure(e)
|
|
277
|
-
raise DatabaseError(f"Postgres query failed: {e}") from e
|
|
278
|
-
|
|
279
|
-
def execute(
|
|
280
|
-
self,
|
|
281
|
-
query: str,
|
|
282
|
-
params: tuple[Any, ...] | dict[str, Any] | None = None,
|
|
283
|
-
) -> None:
|
|
284
|
-
"""Execute a query without returning results."""
|
|
285
|
-
if isinstance(params, dict):
|
|
286
|
-
query, params = _convert_named_params(query, params)
|
|
287
|
-
self._execute(query, params)
|
|
288
|
-
|
|
289
|
-
def execute_returning(
|
|
290
|
-
self,
|
|
291
|
-
query: str,
|
|
292
|
-
params: tuple[Any, ...] | dict[str, Any] | None = None,
|
|
293
|
-
) -> list[dict[str, Any]]:
|
|
294
|
-
"""Execute a query and return all results as dicts."""
|
|
295
|
-
if isinstance(params, dict):
|
|
296
|
-
query, params = _convert_named_params(query, params)
|
|
297
|
-
return self._fetch_all(query, params)
|
|
298
|
-
|
|
299
|
-
def execute_one(
|
|
300
|
-
self,
|
|
301
|
-
query: str,
|
|
302
|
-
params: tuple[Any, ...] | dict[str, Any] | None = None,
|
|
303
|
-
) -> dict[str, Any] | None:
|
|
304
|
-
"""Execute a query and return a single result or None."""
|
|
305
|
-
if isinstance(params, dict):
|
|
306
|
-
query, params = _convert_named_params(query, params)
|
|
307
|
-
return self._fetch_one(query, params)
|
|
308
|
-
|
|
309
|
-
def execute_returning_rowcount(
|
|
310
|
-
self,
|
|
311
|
-
query: str,
|
|
312
|
-
params: tuple[Any, ...] | dict[str, Any] | None = None,
|
|
313
|
-
) -> int:
|
|
314
|
-
"""Execute a query and return the number of affected rows."""
|
|
315
|
-
if isinstance(params, dict):
|
|
316
|
-
query, params = _convert_named_params(query, params)
|
|
317
|
-
|
|
318
|
-
self._circuit_breaker.before_call()
|
|
319
|
-
conn = self._get_current_conn()
|
|
320
|
-
try:
|
|
321
|
-
if conn is not None:
|
|
322
|
-
cursor = conn.execute(query, params or ())
|
|
323
|
-
self._circuit_breaker.record_success()
|
|
324
|
-
return cursor.rowcount
|
|
325
|
-
|
|
326
|
-
pool = self._ensure_pool()
|
|
327
|
-
with pool.connection() as conn:
|
|
328
|
-
conn.row_factory = dict_row
|
|
329
|
-
with conn.transaction():
|
|
330
|
-
if self._isolation_level:
|
|
331
|
-
conn.execute(f"SET TRANSACTION ISOLATION LEVEL {self._isolation_level}")
|
|
332
|
-
cursor = conn.execute(query, params or ())
|
|
333
|
-
rowcount = cursor.rowcount
|
|
334
|
-
self._circuit_breaker.record_success()
|
|
335
|
-
return rowcount
|
|
336
|
-
except psycopg.Error as e:
|
|
337
|
-
self._circuit_breaker.record_failure(e)
|
|
338
|
-
raise DatabaseError(f"Postgres query failed: {e}") from e
|
|
339
|
-
|
|
340
|
-
# =========================================================================
|
|
341
|
-
# Block State Operations
|
|
342
|
-
# =========================================================================
|
|
343
|
-
|
|
344
|
-
def get_block_state(self, chain_id: int) -> BlockState | None:
|
|
345
|
-
row = self.execute_one(
|
|
346
|
-
"SELECT * FROM block_state WHERE chain_id = %s",
|
|
347
|
-
(chain_id,),
|
|
348
|
-
)
|
|
349
|
-
if not row:
|
|
350
|
-
return None
|
|
351
|
-
return BlockState(
|
|
352
|
-
chain_id=row["chain_id"],
|
|
353
|
-
last_processed_block_number=row["last_processed_block_number"],
|
|
354
|
-
last_processed_block_hash=row["last_processed_block_hash"],
|
|
355
|
-
created_at=row["created_at"],
|
|
356
|
-
updated_at=row["updated_at"],
|
|
357
|
-
)
|
|
358
|
-
|
|
359
|
-
def upsert_block_state(
|
|
360
|
-
self,
|
|
361
|
-
chain_id: int,
|
|
362
|
-
block_number: int,
|
|
363
|
-
block_hash: str,
|
|
364
|
-
) -> None:
|
|
365
|
-
self.execute(
|
|
366
|
-
"""
|
|
367
|
-
INSERT INTO block_state (chain_id, last_processed_block_number, last_processed_block_hash)
|
|
368
|
-
VALUES (%s, %s, %s)
|
|
369
|
-
ON CONFLICT(chain_id) DO UPDATE SET
|
|
370
|
-
last_processed_block_number = EXCLUDED.last_processed_block_number,
|
|
371
|
-
last_processed_block_hash = EXCLUDED.last_processed_block_hash,
|
|
372
|
-
updated_at = NOW()
|
|
373
|
-
""",
|
|
374
|
-
(chain_id, block_number, block_hash),
|
|
375
|
-
)
|
|
376
|
-
|
|
377
|
-
def get_block_hash_at_height(
|
|
378
|
-
self, chain_id: int, block_number: int
|
|
379
|
-
) -> str | None:
|
|
380
|
-
row = self.execute_one(
|
|
381
|
-
"SELECT block_hash FROM block_hash_history WHERE chain_id = %s AND block_number = %s",
|
|
382
|
-
(chain_id, block_number),
|
|
383
|
-
)
|
|
384
|
-
return row["block_hash"] if row else None
|
|
385
|
-
|
|
386
|
-
def insert_block_hash(
|
|
387
|
-
self, chain_id: int, block_number: int, block_hash: str
|
|
388
|
-
) -> None:
|
|
389
|
-
self.execute(
|
|
390
|
-
"""
|
|
391
|
-
INSERT INTO block_hash_history (chain_id, block_number, block_hash)
|
|
392
|
-
VALUES (%s, %s, %s)
|
|
393
|
-
ON CONFLICT(chain_id, block_number) DO UPDATE SET
|
|
394
|
-
block_hash = EXCLUDED.block_hash,
|
|
395
|
-
inserted_at = NOW()
|
|
396
|
-
""",
|
|
397
|
-
(chain_id, block_number, block_hash),
|
|
398
|
-
)
|
|
399
|
-
|
|
400
|
-
def delete_block_hashes_above(self, chain_id: int, block_number: int) -> int:
|
|
401
|
-
result = self.execute_returning(
|
|
402
|
-
"""
|
|
403
|
-
DELETE FROM block_hash_history
|
|
404
|
-
WHERE chain_id = %s AND block_number > %s
|
|
405
|
-
RETURNING id
|
|
406
|
-
""",
|
|
407
|
-
(chain_id, block_number),
|
|
408
|
-
)
|
|
409
|
-
return len(result)
|
|
410
|
-
|
|
411
|
-
def delete_block_hash_at_height(self, chain_id: int, block_number: int) -> bool:
|
|
412
|
-
result = self.execute_returning(
|
|
413
|
-
"""
|
|
414
|
-
DELETE FROM block_hash_history
|
|
415
|
-
WHERE chain_id = %s AND block_number = %s
|
|
416
|
-
RETURNING id
|
|
417
|
-
""",
|
|
418
|
-
(chain_id, block_number),
|
|
419
|
-
)
|
|
420
|
-
return len(result) > 0
|
|
421
|
-
|
|
422
|
-
def cleanup_old_block_hashes(self, chain_id: int, keep_count: int) -> int:
|
|
423
|
-
result = self.execute_returning(
|
|
424
|
-
"""
|
|
425
|
-
DELETE FROM block_hash_history
|
|
426
|
-
WHERE chain_id = %s AND block_number < (
|
|
427
|
-
SELECT MAX(block_number) - %s + 1 FROM block_hash_history WHERE chain_id = %s
|
|
428
|
-
)
|
|
429
|
-
RETURNING id
|
|
430
|
-
""",
|
|
431
|
-
(chain_id, keep_count),
|
|
432
|
-
)
|
|
433
|
-
return len(result)
|
|
434
|
-
|
|
435
|
-
def get_oldest_block_in_history(self, chain_id: int) -> int | None:
|
|
436
|
-
row = self.execute_one(
|
|
437
|
-
"SELECT MIN(block_number) as min_block FROM block_hash_history WHERE chain_id = %s",
|
|
438
|
-
(chain_id,),
|
|
439
|
-
)
|
|
440
|
-
return row["min_block"] if row else None
|
|
441
|
-
|
|
442
|
-
def get_latest_block_in_history(self, chain_id: int) -> int | None:
|
|
443
|
-
row = self.execute_one(
|
|
444
|
-
"SELECT MAX(block_number) as max_block FROM block_hash_history WHERE chain_id = %s",
|
|
445
|
-
(chain_id,),
|
|
446
|
-
)
|
|
447
|
-
return row["max_block"] if row else None
|
|
448
|
-
|
|
449
|
-
def get_inflight_intent_count(
|
|
450
|
-
self, chain_id: int, job_id: str, signer_address: str
|
|
451
|
-
) -> int:
|
|
452
|
-
row = self.execute_one(
|
|
453
|
-
"""
|
|
454
|
-
SELECT COUNT(*) as count
|
|
455
|
-
FROM tx_intents
|
|
456
|
-
WHERE chain_id = %s
|
|
457
|
-
AND job_id = %s
|
|
458
|
-
AND signer_address = %s
|
|
459
|
-
AND status IN ('created', 'claimed', 'sending', 'pending')
|
|
460
|
-
""",
|
|
461
|
-
(chain_id, job_id, signer_address.lower()),
|
|
462
|
-
)
|
|
463
|
-
return int(row["count"]) if row else 0
|
|
464
|
-
|
|
465
|
-
def get_inflight_intents_for_scope(
|
|
466
|
-
self,
|
|
467
|
-
chain_id: int,
|
|
468
|
-
job_id: str,
|
|
469
|
-
signer_address: str,
|
|
470
|
-
to_address: str,
|
|
471
|
-
) -> list[dict[str, Any]]:
|
|
472
|
-
rows = self.execute_returning(
|
|
473
|
-
"""
|
|
474
|
-
SELECT intent_id, status, claimed_at, created_at
|
|
475
|
-
FROM tx_intents
|
|
476
|
-
WHERE chain_id = %s
|
|
477
|
-
AND job_id = %s
|
|
478
|
-
AND signer_address = %s
|
|
479
|
-
AND to_address = %s
|
|
480
|
-
AND status IN ('created', 'claimed', 'sending', 'pending')
|
|
481
|
-
ORDER BY created_at ASC
|
|
482
|
-
""",
|
|
483
|
-
(chain_id, job_id, signer_address.lower(), to_address),
|
|
484
|
-
)
|
|
485
|
-
return rows
|
|
486
|
-
|
|
487
|
-
# =========================================================================
|
|
488
|
-
# Job Operations
|
|
489
|
-
# =========================================================================
|
|
490
|
-
|
|
491
|
-
def get_job(self, job_id: str) -> JobConfig | None:
|
|
492
|
-
row = self.execute_one("SELECT * FROM jobs WHERE job_id = %s", (job_id,))
|
|
493
|
-
if not row:
|
|
494
|
-
return None
|
|
495
|
-
return self._row_to_job_config(row)
|
|
496
|
-
|
|
497
|
-
def get_enabled_jobs(self) -> list[JobConfig]:
|
|
498
|
-
rows = self.execute_returning(
|
|
499
|
-
"SELECT * FROM jobs WHERE enabled = true ORDER BY job_id"
|
|
500
|
-
)
|
|
501
|
-
return [self._row_to_job_config(row) for row in rows]
|
|
502
|
-
|
|
503
|
-
def list_all_jobs(self) -> list[JobConfig]:
|
|
504
|
-
rows = self.execute_returning("SELECT * FROM jobs ORDER BY job_id")
|
|
505
|
-
return [self._row_to_job_config(row) for row in rows]
|
|
506
|
-
|
|
507
|
-
def _row_to_job_config(self, row: dict[str, Any]) -> JobConfig:
|
|
508
|
-
return JobConfig(
|
|
509
|
-
job_id=row["job_id"],
|
|
510
|
-
job_name=row["job_name"],
|
|
511
|
-
enabled=row["enabled"],
|
|
512
|
-
check_interval_blocks=row["check_interval_blocks"],
|
|
513
|
-
last_checked_block_number=row["last_checked_block_number"],
|
|
514
|
-
last_triggered_block_number=row["last_triggered_block_number"],
|
|
515
|
-
created_at=row["created_at"],
|
|
516
|
-
updated_at=row["updated_at"],
|
|
517
|
-
)
|
|
518
|
-
|
|
519
|
-
def upsert_job(
|
|
520
|
-
self,
|
|
521
|
-
job_id: str,
|
|
522
|
-
job_name: str,
|
|
523
|
-
check_interval_blocks: int,
|
|
524
|
-
enabled: bool = True,
|
|
525
|
-
) -> None:
|
|
526
|
-
self.execute(
|
|
527
|
-
"""
|
|
528
|
-
INSERT INTO jobs (job_id, job_name, check_interval_blocks, enabled)
|
|
529
|
-
VALUES (%s, %s, %s, %s)
|
|
530
|
-
ON CONFLICT(job_id) DO UPDATE SET
|
|
531
|
-
job_name = EXCLUDED.job_name,
|
|
532
|
-
check_interval_blocks = EXCLUDED.check_interval_blocks,
|
|
533
|
-
updated_at = NOW()
|
|
534
|
-
""",
|
|
535
|
-
(job_id, job_name, check_interval_blocks, enabled),
|
|
536
|
-
)
|
|
537
|
-
|
|
538
|
-
def update_job_checked(
|
|
539
|
-
self, job_id: str, block_number: int, triggered: bool = False
|
|
540
|
-
) -> None:
|
|
541
|
-
if triggered:
|
|
542
|
-
self.execute(
|
|
543
|
-
"""
|
|
544
|
-
UPDATE jobs SET
|
|
545
|
-
last_checked_block_number = %s,
|
|
546
|
-
last_triggered_block_number = %s,
|
|
547
|
-
updated_at = NOW()
|
|
548
|
-
WHERE job_id = %s
|
|
549
|
-
""",
|
|
550
|
-
(block_number, job_id),
|
|
551
|
-
)
|
|
552
|
-
else:
|
|
553
|
-
self.execute(
|
|
554
|
-
"""
|
|
555
|
-
UPDATE jobs SET
|
|
556
|
-
last_checked_block_number = %s,
|
|
557
|
-
updated_at = NOW()
|
|
558
|
-
WHERE job_id = %s
|
|
559
|
-
""",
|
|
560
|
-
(block_number, job_id),
|
|
561
|
-
)
|
|
562
|
-
|
|
563
|
-
def set_job_enabled(self, job_id: str, enabled: bool) -> bool:
|
|
564
|
-
result = self.execute_returning(
|
|
565
|
-
"""
|
|
566
|
-
UPDATE jobs SET enabled = %s, updated_at = NOW()
|
|
567
|
-
WHERE job_id = %s
|
|
568
|
-
RETURNING job_id
|
|
569
|
-
""",
|
|
570
|
-
(enabled, job_id),
|
|
571
|
-
)
|
|
572
|
-
return len(result) > 0
|
|
573
|
-
|
|
574
|
-
def delete_job(self, job_id: str) -> bool:
|
|
575
|
-
# Delete job_kv entries first (foreign key)
|
|
576
|
-
self.execute("DELETE FROM job_kv WHERE job_id = %s", (job_id,))
|
|
577
|
-
result = self.execute_returning(
|
|
578
|
-
"DELETE FROM jobs WHERE job_id = %s RETURNING job_id",
|
|
579
|
-
(job_id,),
|
|
580
|
-
)
|
|
581
|
-
return len(result) > 0
|
|
582
|
-
|
|
583
|
-
def get_job_kv(self, job_id: str, key: str) -> Any | None:
|
|
584
|
-
row = self.execute_one(
|
|
585
|
-
"SELECT value_json FROM job_kv WHERE job_id = %s AND key = %s",
|
|
586
|
-
(job_id, key),
|
|
587
|
-
)
|
|
588
|
-
if not row:
|
|
589
|
-
return None
|
|
590
|
-
return json.loads(row["value_json"])
|
|
591
|
-
|
|
592
|
-
def set_job_kv(self, job_id: str, key: str, value: Any) -> None:
|
|
593
|
-
value_json = json.dumps(value)
|
|
594
|
-
self.execute(
|
|
595
|
-
"""
|
|
596
|
-
INSERT INTO job_kv (job_id, key, value_json)
|
|
597
|
-
VALUES (%s, %s, %s)
|
|
598
|
-
ON CONFLICT(job_id, key) DO UPDATE SET
|
|
599
|
-
value_json = EXCLUDED.value_json,
|
|
600
|
-
updated_at = NOW()
|
|
601
|
-
""",
|
|
602
|
-
(job_id, key, value_json),
|
|
603
|
-
)
|
|
604
|
-
|
|
605
|
-
def delete_job_kv(self, job_id: str, key: str) -> bool:
|
|
606
|
-
result = self.execute_returning(
|
|
607
|
-
"DELETE FROM job_kv WHERE job_id = %s AND key = %s RETURNING job_id",
|
|
608
|
-
(job_id, key),
|
|
609
|
-
)
|
|
610
|
-
return len(result) > 0
|
|
611
|
-
|
|
612
|
-
# =========================================================================
|
|
613
|
-
# Signer & Nonce Operations
|
|
614
|
-
# =========================================================================
|
|
615
|
-
|
|
616
|
-
def get_signer_state(self, chain_id: int, address: str) -> SignerState | None:
|
|
617
|
-
row = self.execute_one(
|
|
618
|
-
"SELECT * FROM signers WHERE chain_id = %s AND signer_address = %s",
|
|
619
|
-
(chain_id, address),
|
|
620
|
-
)
|
|
621
|
-
if not row:
|
|
622
|
-
return None
|
|
623
|
-
return self._row_to_signer_state(row)
|
|
624
|
-
|
|
625
|
-
def get_all_signers(self, chain_id: int) -> list[SignerState]:
|
|
626
|
-
rows = self.execute_returning(
|
|
627
|
-
"SELECT * FROM signers WHERE chain_id = %s",
|
|
628
|
-
(chain_id,),
|
|
629
|
-
)
|
|
630
|
-
return [self._row_to_signer_state(row) for row in rows]
|
|
631
|
-
|
|
632
|
-
def _row_to_signer_state(self, row: dict[str, Any]) -> SignerState:
|
|
633
|
-
return SignerState(
|
|
634
|
-
chain_id=row["chain_id"],
|
|
635
|
-
signer_address=row["signer_address"],
|
|
636
|
-
next_nonce=row["next_nonce"],
|
|
637
|
-
last_synced_chain_nonce=row["last_synced_chain_nonce"],
|
|
638
|
-
created_at=row["created_at"],
|
|
639
|
-
updated_at=row["updated_at"],
|
|
640
|
-
gap_started_at=row.get("gap_started_at"),
|
|
641
|
-
alias=row.get("alias"),
|
|
642
|
-
)
|
|
643
|
-
|
|
644
|
-
def upsert_signer(
|
|
645
|
-
self,
|
|
646
|
-
chain_id: int,
|
|
647
|
-
address: str,
|
|
648
|
-
next_nonce: int,
|
|
649
|
-
last_synced_chain_nonce: int | None = None,
|
|
650
|
-
) -> None:
|
|
651
|
-
self.execute(
|
|
652
|
-
"""
|
|
653
|
-
INSERT INTO signers (chain_id, signer_address, next_nonce, last_synced_chain_nonce)
|
|
654
|
-
VALUES (%s, %s, %s, %s)
|
|
655
|
-
ON CONFLICT(chain_id, signer_address) DO UPDATE SET
|
|
656
|
-
next_nonce = EXCLUDED.next_nonce,
|
|
657
|
-
last_synced_chain_nonce = EXCLUDED.last_synced_chain_nonce,
|
|
658
|
-
updated_at = NOW()
|
|
659
|
-
""",
|
|
660
|
-
(chain_id, address, next_nonce, last_synced_chain_nonce),
|
|
661
|
-
)
|
|
662
|
-
|
|
663
|
-
def update_signer_next_nonce(
|
|
664
|
-
self, chain_id: int, address: str, next_nonce: int
|
|
665
|
-
) -> None:
|
|
666
|
-
self.execute(
|
|
667
|
-
"""
|
|
668
|
-
UPDATE signers SET next_nonce = %s, updated_at = NOW()
|
|
669
|
-
WHERE chain_id = %s AND signer_address = %s
|
|
670
|
-
""",
|
|
671
|
-
(next_nonce, chain_id, address),
|
|
672
|
-
)
|
|
673
|
-
|
|
674
|
-
def update_signer_chain_nonce(
|
|
675
|
-
self, chain_id: int, address: str, chain_nonce: int
|
|
676
|
-
) -> None:
|
|
677
|
-
self.execute(
|
|
678
|
-
"""
|
|
679
|
-
UPDATE signers SET last_synced_chain_nonce = %s, updated_at = NOW()
|
|
680
|
-
WHERE chain_id = %s AND signer_address = %s
|
|
681
|
-
""",
|
|
682
|
-
(chain_nonce, chain_id, address),
|
|
683
|
-
)
|
|
684
|
-
|
|
685
|
-
def set_gap_started_at(
|
|
686
|
-
self, chain_id: int, address: str, started_at: datetime
|
|
687
|
-
) -> None:
|
|
688
|
-
"""Record when gap blocking started for a signer."""
|
|
689
|
-
self.execute(
|
|
690
|
-
"""
|
|
691
|
-
UPDATE signers SET gap_started_at = %s, updated_at = NOW()
|
|
692
|
-
WHERE chain_id = %s AND signer_address = %s
|
|
693
|
-
""",
|
|
694
|
-
(started_at, chain_id, address),
|
|
695
|
-
)
|
|
696
|
-
|
|
697
|
-
def clear_gap_started_at(self, chain_id: int, address: str) -> None:
|
|
698
|
-
"""Clear gap tracking (gap resolved or force reset)."""
|
|
699
|
-
self.execute(
|
|
700
|
-
"""
|
|
701
|
-
UPDATE signers SET gap_started_at = NULL, updated_at = NOW()
|
|
702
|
-
WHERE chain_id = %s AND signer_address = %s
|
|
703
|
-
""",
|
|
704
|
-
(chain_id, address),
|
|
705
|
-
)
|
|
706
|
-
|
|
707
|
-
def get_signer_by_alias(self, chain_id: int, alias: str) -> SignerState | None:
|
|
708
|
-
"""Get signer by alias. Returns None if not found."""
|
|
709
|
-
row = self.execute_one(
|
|
710
|
-
"""
|
|
711
|
-
SELECT * FROM signers
|
|
712
|
-
WHERE chain_id = %s AND alias = %s
|
|
713
|
-
""",
|
|
714
|
-
(chain_id, alias),
|
|
715
|
-
)
|
|
716
|
-
if not row:
|
|
717
|
-
return None
|
|
718
|
-
return self._row_to_signer_state(row)
|
|
719
|
-
|
|
720
|
-
def reserve_nonce_atomic(
|
|
721
|
-
self,
|
|
722
|
-
chain_id: int,
|
|
723
|
-
address: str,
|
|
724
|
-
chain_nonce: int | None,
|
|
725
|
-
intent_id: UUID | None = None,
|
|
726
|
-
) -> int:
|
|
727
|
-
"""Reserve a nonce atomically with serialization conflict retry.
|
|
728
|
-
|
|
729
|
-
Uses SERIALIZABLE isolation to prevent race conditions. Retries
|
|
730
|
-
automatically on PostgreSQL serialization failures (40001).
|
|
731
|
-
|
|
732
|
-
The inner transaction uses context manager which guarantees
|
|
733
|
-
rollback on exception, making retries safe.
|
|
734
|
-
|
|
735
|
-
Args:
|
|
736
|
-
chain_id: The chain ID
|
|
737
|
-
address: Signer address (lowercase)
|
|
738
|
-
chain_nonce: Current on-chain nonce (from eth_getTransactionCount)
|
|
739
|
-
intent_id: Optional intent ID to associate with reservation
|
|
740
|
-
|
|
741
|
-
Returns:
|
|
742
|
-
The reserved nonce
|
|
743
|
-
|
|
744
|
-
Raises:
|
|
745
|
-
DatabaseError: If reservation fails after all retries
|
|
746
|
-
"""
|
|
747
|
-
from brawny.metrics import get_metrics
|
|
748
|
-
|
|
749
|
-
last_error: Exception | None = None
|
|
750
|
-
|
|
751
|
-
for attempt in range(MAX_SERIALIZATION_RETRIES + 1):
|
|
752
|
-
try:
|
|
753
|
-
return self._reserve_nonce_atomic_inner(
|
|
754
|
-
chain_id, address, chain_nonce, intent_id
|
|
755
|
-
)
|
|
756
|
-
except Exception as e:
|
|
757
|
-
if not _is_serialization_failure(e):
|
|
758
|
-
raise
|
|
759
|
-
|
|
760
|
-
last_error = e
|
|
761
|
-
get_metrics().counter("brawny_nonce_serialization_retries_total").inc()
|
|
762
|
-
|
|
763
|
-
if attempt < MAX_SERIALIZATION_RETRIES:
|
|
764
|
-
# Exponential backoff with jitter
|
|
765
|
-
delay_ms = min(BASE_RETRY_DELAY_MS * (2 ** attempt), MAX_RETRY_DELAY_MS)
|
|
766
|
-
jitter = random.uniform(0, delay_ms * 0.3)
|
|
767
|
-
time.sleep((delay_ms + jitter) / 1000.0)
|
|
768
|
-
|
|
769
|
-
logger.debug(
|
|
770
|
-
"nonce.serialization_retry",
|
|
771
|
-
chain_id=chain_id,
|
|
772
|
-
address=address,
|
|
773
|
-
attempt=attempt + 1,
|
|
774
|
-
delay_ms=delay_ms,
|
|
775
|
-
)
|
|
776
|
-
|
|
777
|
-
# Exhausted retries
|
|
778
|
-
logger.error(
|
|
779
|
-
"nonce.serialization_retries_exhausted",
|
|
780
|
-
chain_id=chain_id,
|
|
781
|
-
address=address,
|
|
782
|
-
max_retries=MAX_SERIALIZATION_RETRIES,
|
|
783
|
-
exc_info=True,
|
|
784
|
-
)
|
|
785
|
-
raise last_error # type: ignore[misc]
|
|
786
|
-
|
|
787
|
-
def _reserve_nonce_atomic_inner(
|
|
788
|
-
self,
|
|
789
|
-
chain_id: int,
|
|
790
|
-
address: str,
|
|
791
|
-
chain_nonce: int | None,
|
|
792
|
-
intent_id: UUID | None,
|
|
793
|
-
) -> int:
|
|
794
|
-
"""Inner implementation - single attempt.
|
|
795
|
-
|
|
796
|
-
Uses context manager for transaction which guarantees
|
|
797
|
-
rollback on exception, leaving connection in clean state for retry.
|
|
798
|
-
"""
|
|
799
|
-
self._circuit_breaker.before_call()
|
|
800
|
-
pool = self._ensure_pool()
|
|
801
|
-
|
|
802
|
-
try:
|
|
803
|
-
with pool.connection() as conn:
|
|
804
|
-
conn.row_factory = dict_row
|
|
805
|
-
with conn.transaction():
|
|
806
|
-
# Use SERIALIZABLE isolation for atomic nonce reservation
|
|
807
|
-
conn.execute("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE")
|
|
808
|
-
|
|
809
|
-
# Ensure signer row exists
|
|
810
|
-
conn.execute(
|
|
811
|
-
"""
|
|
812
|
-
INSERT INTO signers (chain_id, signer_address, next_nonce, last_synced_chain_nonce)
|
|
813
|
-
VALUES (%s, %s, 0, NULL)
|
|
814
|
-
ON CONFLICT(chain_id, signer_address) DO NOTHING
|
|
815
|
-
""",
|
|
816
|
-
(chain_id, address),
|
|
817
|
-
)
|
|
818
|
-
|
|
819
|
-
# Lock the signer row
|
|
820
|
-
row = conn.execute(
|
|
821
|
-
"""
|
|
822
|
-
SELECT * FROM signers
|
|
823
|
-
WHERE chain_id = %s AND signer_address = %s
|
|
824
|
-
FOR UPDATE
|
|
825
|
-
""",
|
|
826
|
-
(chain_id, address),
|
|
827
|
-
).fetchone()
|
|
828
|
-
|
|
829
|
-
if row is None:
|
|
830
|
-
raise DatabaseError("Failed to lock signer row")
|
|
831
|
-
|
|
832
|
-
db_next_nonce = row["next_nonce"]
|
|
833
|
-
base_nonce = chain_nonce if chain_nonce is not None else db_next_nonce
|
|
834
|
-
|
|
835
|
-
# Get existing reservations to find gaps
|
|
836
|
-
reservations = conn.execute(
|
|
837
|
-
"""
|
|
838
|
-
SELECT nonce FROM nonce_reservations
|
|
839
|
-
WHERE chain_id = %s AND signer_address = %s
|
|
840
|
-
AND status != 'released'
|
|
841
|
-
AND nonce >= %s
|
|
842
|
-
ORDER BY nonce
|
|
843
|
-
""",
|
|
844
|
-
(chain_id, address, base_nonce),
|
|
845
|
-
).fetchall()
|
|
846
|
-
|
|
847
|
-
# Find next available nonce (skip existing reservations)
|
|
848
|
-
candidate = base_nonce
|
|
849
|
-
for res in reservations:
|
|
850
|
-
if res["nonce"] == candidate:
|
|
851
|
-
candidate += 1
|
|
852
|
-
elif res["nonce"] > candidate:
|
|
853
|
-
break
|
|
854
|
-
|
|
855
|
-
if candidate - base_nonce > 100:
|
|
856
|
-
raise DatabaseError(
|
|
857
|
-
f"Could not find available nonce within 100 slots for signer {address}"
|
|
858
|
-
)
|
|
859
|
-
|
|
860
|
-
# Create the reservation
|
|
861
|
-
conn.execute(
|
|
862
|
-
"""
|
|
863
|
-
INSERT INTO nonce_reservations (chain_id, signer_address, nonce, status, intent_id)
|
|
864
|
-
VALUES (%s, %s, %s, %s, %s)
|
|
865
|
-
ON CONFLICT(chain_id, signer_address, nonce) DO UPDATE SET
|
|
866
|
-
status = EXCLUDED.status,
|
|
867
|
-
intent_id = EXCLUDED.intent_id,
|
|
868
|
-
updated_at = NOW()
|
|
869
|
-
""",
|
|
870
|
-
(chain_id, address, candidate, NonceStatus.RESERVED.value, intent_id),
|
|
871
|
-
)
|
|
872
|
-
|
|
873
|
-
# Update signer's next_nonce
|
|
874
|
-
new_next_nonce = max(db_next_nonce, candidate + 1)
|
|
875
|
-
conn.execute(
|
|
876
|
-
"""
|
|
877
|
-
UPDATE signers SET next_nonce = %s, updated_at = NOW()
|
|
878
|
-
WHERE chain_id = %s AND signer_address = %s
|
|
879
|
-
""",
|
|
880
|
-
(new_next_nonce, chain_id, address),
|
|
881
|
-
)
|
|
882
|
-
|
|
883
|
-
self._circuit_breaker.record_success()
|
|
884
|
-
return candidate
|
|
885
|
-
|
|
886
|
-
except DatabaseError:
|
|
887
|
-
self._circuit_breaker.record_failure(None)
|
|
888
|
-
raise
|
|
889
|
-
except Exception as e:
|
|
890
|
-
self._circuit_breaker.record_failure(e)
|
|
891
|
-
raise DatabaseError(f"Nonce reservation failed: {e}") from e
|
|
892
|
-
|
|
893
|
-
def get_nonce_reservation(
|
|
894
|
-
self, chain_id: int, address: str, nonce: int
|
|
895
|
-
) -> NonceReservation | None:
|
|
896
|
-
row = self.execute_one(
|
|
897
|
-
"""
|
|
898
|
-
SELECT * FROM nonce_reservations
|
|
899
|
-
WHERE chain_id = %s AND signer_address = %s AND nonce = %s
|
|
900
|
-
""",
|
|
901
|
-
(chain_id, address, nonce),
|
|
902
|
-
)
|
|
903
|
-
if not row:
|
|
904
|
-
return None
|
|
905
|
-
return self._row_to_nonce_reservation(row)
|
|
906
|
-
|
|
907
|
-
def get_reservations_for_signer(
|
|
908
|
-
self, chain_id: int, address: str, status: str | None = None
|
|
909
|
-
) -> list[NonceReservation]:
|
|
910
|
-
if status:
|
|
911
|
-
rows = self.execute_returning(
|
|
912
|
-
"""
|
|
913
|
-
SELECT * FROM nonce_reservations
|
|
914
|
-
WHERE chain_id = %s AND signer_address = %s AND status = %s
|
|
915
|
-
ORDER BY nonce
|
|
916
|
-
""",
|
|
917
|
-
(chain_id, address, status),
|
|
918
|
-
)
|
|
919
|
-
else:
|
|
920
|
-
rows = self.execute_returning(
|
|
921
|
-
"""
|
|
922
|
-
SELECT * FROM nonce_reservations
|
|
923
|
-
WHERE chain_id = %s AND signer_address = %s
|
|
924
|
-
ORDER BY nonce
|
|
925
|
-
""",
|
|
926
|
-
(chain_id, address),
|
|
927
|
-
)
|
|
928
|
-
return [self._row_to_nonce_reservation(row) for row in rows]
|
|
929
|
-
|
|
930
|
-
def get_reservations_below_nonce(
|
|
931
|
-
self, chain_id: int, address: str, nonce: int
|
|
932
|
-
) -> list[NonceReservation]:
|
|
933
|
-
rows = self.execute_returning(
|
|
934
|
-
"""
|
|
935
|
-
SELECT * FROM nonce_reservations
|
|
936
|
-
WHERE chain_id = %s AND signer_address = %s AND nonce < %s
|
|
937
|
-
ORDER BY nonce
|
|
938
|
-
""",
|
|
939
|
-
(chain_id, address, nonce),
|
|
940
|
-
)
|
|
941
|
-
return [self._row_to_nonce_reservation(row) for row in rows]
|
|
942
|
-
|
|
943
|
-
def _row_to_nonce_reservation(self, row: dict[str, Any]) -> NonceReservation:
|
|
944
|
-
return NonceReservation(
|
|
945
|
-
id=row["id"],
|
|
946
|
-
chain_id=row["chain_id"],
|
|
947
|
-
signer_address=row["signer_address"],
|
|
948
|
-
nonce=row["nonce"],
|
|
949
|
-
status=NonceStatus(row["status"]),
|
|
950
|
-
intent_id=row["intent_id"],
|
|
951
|
-
created_at=row["created_at"],
|
|
952
|
-
updated_at=row["updated_at"],
|
|
953
|
-
)
|
|
954
|
-
|
|
955
|
-
def create_nonce_reservation(
|
|
956
|
-
self,
|
|
957
|
-
chain_id: int,
|
|
958
|
-
address: str,
|
|
959
|
-
nonce: int,
|
|
960
|
-
status: str = "reserved",
|
|
961
|
-
intent_id: UUID | None = None,
|
|
962
|
-
) -> NonceReservation:
|
|
963
|
-
row = self.execute_one(
|
|
964
|
-
"""
|
|
965
|
-
INSERT INTO nonce_reservations (chain_id, signer_address, nonce, status, intent_id)
|
|
966
|
-
VALUES (%s, %s, %s, %s, %s)
|
|
967
|
-
ON CONFLICT(chain_id, signer_address, nonce) DO UPDATE SET
|
|
968
|
-
status = EXCLUDED.status,
|
|
969
|
-
intent_id = EXCLUDED.intent_id,
|
|
970
|
-
updated_at = NOW()
|
|
971
|
-
RETURNING *
|
|
972
|
-
""",
|
|
973
|
-
(chain_id, address, nonce, status, intent_id),
|
|
974
|
-
)
|
|
975
|
-
if not row:
|
|
976
|
-
raise DatabaseError("Failed to create nonce reservation")
|
|
977
|
-
return self._row_to_nonce_reservation(row)
|
|
978
|
-
|
|
979
|
-
def update_nonce_reservation_status(
|
|
980
|
-
self,
|
|
981
|
-
chain_id: int,
|
|
982
|
-
address: str,
|
|
983
|
-
nonce: int,
|
|
984
|
-
status: str,
|
|
985
|
-
intent_id: UUID | None = None,
|
|
986
|
-
) -> bool:
|
|
987
|
-
if intent_id:
|
|
988
|
-
result = self.execute_returning(
|
|
989
|
-
"""
|
|
990
|
-
UPDATE nonce_reservations SET status = %s, intent_id = %s, updated_at = NOW()
|
|
991
|
-
WHERE chain_id = %s AND signer_address = %s AND nonce = %s
|
|
992
|
-
RETURNING id
|
|
993
|
-
""",
|
|
994
|
-
(status, intent_id, chain_id, address, nonce),
|
|
995
|
-
)
|
|
996
|
-
else:
|
|
997
|
-
result = self.execute_returning(
|
|
998
|
-
"""
|
|
999
|
-
UPDATE nonce_reservations SET status = %s, updated_at = NOW()
|
|
1000
|
-
WHERE chain_id = %s AND signer_address = %s AND nonce = %s
|
|
1001
|
-
RETURNING id
|
|
1002
|
-
""",
|
|
1003
|
-
(status, chain_id, address, nonce),
|
|
1004
|
-
)
|
|
1005
|
-
return len(result) > 0
|
|
1006
|
-
|
|
1007
|
-
def release_nonce_reservation(
|
|
1008
|
-
self, chain_id: int, address: str, nonce: int
|
|
1009
|
-
) -> bool:
|
|
1010
|
-
return self.update_nonce_reservation_status(
|
|
1011
|
-
chain_id, address, nonce, "released"
|
|
1012
|
-
)
|
|
1013
|
-
|
|
1014
|
-
def cleanup_orphaned_nonces(
|
|
1015
|
-
self, chain_id: int, older_than_hours: int = 24
|
|
1016
|
-
) -> int:
|
|
1017
|
-
with self._get_connection() as conn:
|
|
1018
|
-
result = conn.execute(
|
|
1019
|
-
text("""
|
|
1020
|
-
DELETE FROM nonce_reservations
|
|
1021
|
-
WHERE chain_id = :chain_id
|
|
1022
|
-
AND status = 'orphaned'
|
|
1023
|
-
AND updated_at < NOW() - MAKE_INTERVAL(hours => :hours)
|
|
1024
|
-
RETURNING id
|
|
1025
|
-
"""),
|
|
1026
|
-
{"chain_id": chain_id, "hours": older_than_hours},
|
|
1027
|
-
)
|
|
1028
|
-
return len(result.fetchall())
|
|
1029
|
-
|
|
1030
|
-
# =========================================================================
|
|
1031
|
-
# Intent Operations
|
|
1032
|
-
# =========================================================================
|
|
1033
|
-
|
|
1034
|
-
def create_intent(
|
|
1035
|
-
self,
|
|
1036
|
-
intent_id: UUID,
|
|
1037
|
-
job_id: str,
|
|
1038
|
-
chain_id: int,
|
|
1039
|
-
signer_address: str,
|
|
1040
|
-
idempotency_key: str,
|
|
1041
|
-
to_address: str,
|
|
1042
|
-
data: str | None,
|
|
1043
|
-
value_wei: str,
|
|
1044
|
-
gas_limit: int | None,
|
|
1045
|
-
max_fee_per_gas: str | None,
|
|
1046
|
-
max_priority_fee_per_gas: str | None,
|
|
1047
|
-
min_confirmations: int,
|
|
1048
|
-
deadline_ts: datetime | None,
|
|
1049
|
-
broadcast_group: str | None = None,
|
|
1050
|
-
broadcast_endpoints: list[str] | None = None,
|
|
1051
|
-
) -> TxIntent | None:
|
|
1052
|
-
try:
|
|
1053
|
-
signer_address = signer_address.lower()
|
|
1054
|
-
row = self.execute_one(
|
|
1055
|
-
"""
|
|
1056
|
-
INSERT INTO tx_intents (
|
|
1057
|
-
intent_id, job_id, chain_id, signer_address, idempotency_key,
|
|
1058
|
-
to_address, data, value_wei, gas_limit, max_fee_per_gas,
|
|
1059
|
-
max_priority_fee_per_gas, min_confirmations, deadline_ts,
|
|
1060
|
-
broadcast_group, broadcast_endpoints_json, retry_after, status
|
|
1061
|
-
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NULL, 'created')
|
|
1062
|
-
ON CONFLICT (chain_id, signer_address, idempotency_key) DO NOTHING
|
|
1063
|
-
RETURNING *
|
|
1064
|
-
""",
|
|
1065
|
-
(
|
|
1066
|
-
intent_id,
|
|
1067
|
-
job_id,
|
|
1068
|
-
chain_id,
|
|
1069
|
-
signer_address,
|
|
1070
|
-
idempotency_key,
|
|
1071
|
-
to_address,
|
|
1072
|
-
data,
|
|
1073
|
-
value_wei,
|
|
1074
|
-
gas_limit,
|
|
1075
|
-
max_fee_per_gas,
|
|
1076
|
-
max_priority_fee_per_gas,
|
|
1077
|
-
min_confirmations,
|
|
1078
|
-
deadline_ts,
|
|
1079
|
-
broadcast_group,
|
|
1080
|
-
json.dumps(broadcast_endpoints) if broadcast_endpoints else None,
|
|
1081
|
-
),
|
|
1082
|
-
)
|
|
1083
|
-
if row:
|
|
1084
|
-
return self._row_to_intent(row)
|
|
1085
|
-
return None
|
|
1086
|
-
except psycopg.Error as e:
|
|
1087
|
-
logger.warning(
|
|
1088
|
-
"db.create_intent_failed",
|
|
1089
|
-
error=str(e),
|
|
1090
|
-
intent_id=str(intent_id),
|
|
1091
|
-
idempotency_key=idempotency_key,
|
|
1092
|
-
job_id=job_id,
|
|
1093
|
-
)
|
|
1094
|
-
return None
|
|
1095
|
-
|
|
1096
|
-
def get_intent(self, intent_id: UUID) -> TxIntent | None:
|
|
1097
|
-
row = self.execute_one(
|
|
1098
|
-
"SELECT * FROM tx_intents WHERE intent_id = %s",
|
|
1099
|
-
(intent_id,),
|
|
1100
|
-
)
|
|
1101
|
-
if not row:
|
|
1102
|
-
return None
|
|
1103
|
-
return self._row_to_intent(row)
|
|
1104
|
-
|
|
1105
|
-
def get_intent_by_idempotency_key(
|
|
1106
|
-
self,
|
|
1107
|
-
chain_id: int,
|
|
1108
|
-
signer_address: str,
|
|
1109
|
-
idempotency_key: str,
|
|
1110
|
-
) -> TxIntent | None:
|
|
1111
|
-
row = self.execute_one(
|
|
1112
|
-
"SELECT * FROM tx_intents WHERE chain_id = %s AND signer_address = %s AND idempotency_key = %s",
|
|
1113
|
-
(chain_id, signer_address.lower(), idempotency_key),
|
|
1114
|
-
)
|
|
1115
|
-
if not row:
|
|
1116
|
-
return None
|
|
1117
|
-
return self._row_to_intent(row)
|
|
1118
|
-
|
|
1119
|
-
def _row_to_intent(self, row: dict[str, Any]) -> TxIntent:
|
|
1120
|
-
return TxIntent(
|
|
1121
|
-
intent_id=row["intent_id"],
|
|
1122
|
-
job_id=row["job_id"],
|
|
1123
|
-
chain_id=row["chain_id"],
|
|
1124
|
-
signer_address=row["signer_address"],
|
|
1125
|
-
idempotency_key=row["idempotency_key"],
|
|
1126
|
-
to_address=row["to_address"],
|
|
1127
|
-
data=row["data"],
|
|
1128
|
-
value_wei=row["value_wei"],
|
|
1129
|
-
gas_limit=row["gas_limit"],
|
|
1130
|
-
max_fee_per_gas=row["max_fee_per_gas"],
|
|
1131
|
-
max_priority_fee_per_gas=row["max_priority_fee_per_gas"],
|
|
1132
|
-
min_confirmations=row["min_confirmations"],
|
|
1133
|
-
deadline_ts=row["deadline_ts"],
|
|
1134
|
-
retry_after=row.get("retry_after"),
|
|
1135
|
-
retry_count=row.get("retry_count", 0),
|
|
1136
|
-
status=IntentStatus(row["status"]),
|
|
1137
|
-
claim_token=row["claim_token"],
|
|
1138
|
-
claimed_at=row["claimed_at"],
|
|
1139
|
-
created_at=row["created_at"],
|
|
1140
|
-
updated_at=row["updated_at"],
|
|
1141
|
-
# Broadcast binding (may be None if not yet broadcast)
|
|
1142
|
-
broadcast_group=row.get("broadcast_group"),
|
|
1143
|
-
broadcast_endpoints_json=row.get("broadcast_endpoints_json"),
|
|
1144
|
-
)
|
|
1145
|
-
|
|
1146
|
-
def get_intents_by_status(
|
|
1147
|
-
self,
|
|
1148
|
-
status: str | list[str],
|
|
1149
|
-
chain_id: int | None = None,
|
|
1150
|
-
job_id: str | None = None,
|
|
1151
|
-
limit: int = 100,
|
|
1152
|
-
) -> list[TxIntent]:
|
|
1153
|
-
if isinstance(status, str):
|
|
1154
|
-
status = [status]
|
|
1155
|
-
|
|
1156
|
-
# Build query dynamically
|
|
1157
|
-
query = "SELECT * FROM tx_intents WHERE status = ANY(%s)"
|
|
1158
|
-
params: list[Any] = [status]
|
|
1159
|
-
param_idx = 2
|
|
1160
|
-
|
|
1161
|
-
if chain_id is not None:
|
|
1162
|
-
query += f" AND chain_id = ${param_idx}"
|
|
1163
|
-
params.append(chain_id)
|
|
1164
|
-
param_idx += 1
|
|
1165
|
-
if job_id is not None:
|
|
1166
|
-
query += f" AND job_id = ${param_idx}"
|
|
1167
|
-
params.append(job_id)
|
|
1168
|
-
param_idx += 1
|
|
1169
|
-
|
|
1170
|
-
query += f" ORDER BY created_at ASC LIMIT ${param_idx}"
|
|
1171
|
-
params.append(limit)
|
|
1172
|
-
|
|
1173
|
-
rows = self.execute_returning(query, tuple(params))
|
|
1174
|
-
return [self._row_to_intent(row) for row in rows]
|
|
1175
|
-
|
|
1176
|
-
def list_intents_filtered(
|
|
1177
|
-
self,
|
|
1178
|
-
status: str | None = None,
|
|
1179
|
-
job_id: str | None = None,
|
|
1180
|
-
limit: int = 50,
|
|
1181
|
-
) -> list[dict[str, Any]]:
|
|
1182
|
-
query = "SELECT * FROM tx_intents WHERE 1=1"
|
|
1183
|
-
params: list[Any] = []
|
|
1184
|
-
|
|
1185
|
-
if status is not None:
|
|
1186
|
-
query += " AND status = %s"
|
|
1187
|
-
params.append(status)
|
|
1188
|
-
if job_id is not None:
|
|
1189
|
-
query += " AND job_id = %s"
|
|
1190
|
-
params.append(job_id)
|
|
1191
|
-
|
|
1192
|
-
query += " ORDER BY created_at DESC LIMIT %s"
|
|
1193
|
-
params.append(limit)
|
|
1194
|
-
|
|
1195
|
-
return self.execute_returning(query, tuple(params))
|
|
1196
|
-
|
|
1197
|
-
def get_active_intent_count(self, job_id: str, chain_id: int | None = None) -> int:
|
|
1198
|
-
params: list[Any] = [
|
|
1199
|
-
[IntentStatus.CREATED.value,
|
|
1200
|
-
IntentStatus.CLAIMED.value,
|
|
1201
|
-
IntentStatus.SENDING.value,
|
|
1202
|
-
IntentStatus.PENDING.value],
|
|
1203
|
-
job_id,
|
|
1204
|
-
]
|
|
1205
|
-
query = "SELECT COUNT(*) AS count FROM tx_intents WHERE status = ANY(%s) AND job_id = %s"
|
|
1206
|
-
if chain_id is not None:
|
|
1207
|
-
query += " AND chain_id = %s"
|
|
1208
|
-
params.append(chain_id)
|
|
1209
|
-
row = self.execute_one(query, tuple(params))
|
|
1210
|
-
return int(row["count"]) if row else 0
|
|
1211
|
-
|
|
1212
|
-
def get_pending_intent_count(self, chain_id: int | None = None) -> int:
|
|
1213
|
-
params: list[Any] = [
|
|
1214
|
-
[IntentStatus.CREATED.value,
|
|
1215
|
-
IntentStatus.CLAIMED.value,
|
|
1216
|
-
IntentStatus.SENDING.value,
|
|
1217
|
-
IntentStatus.PENDING.value],
|
|
1218
|
-
]
|
|
1219
|
-
query = "SELECT COUNT(*) AS count FROM tx_intents WHERE status = ANY(%s)"
|
|
1220
|
-
if chain_id is not None:
|
|
1221
|
-
query += " AND chain_id = %s"
|
|
1222
|
-
params.append(chain_id)
|
|
1223
|
-
row = self.execute_one(query, tuple(params))
|
|
1224
|
-
return int(row["count"]) if row else 0
|
|
1225
|
-
|
|
1226
|
-
def get_backing_off_intent_count(self, chain_id: int | None = None) -> int:
|
|
1227
|
-
query = "SELECT COUNT(*) AS count FROM tx_intents WHERE retry_after > NOW()"
|
|
1228
|
-
params: list[Any] = []
|
|
1229
|
-
if chain_id is not None:
|
|
1230
|
-
query += " AND chain_id = %s"
|
|
1231
|
-
params.append(chain_id)
|
|
1232
|
-
row = self.execute_one(query, tuple(params))
|
|
1233
|
-
return int(row["count"]) if row else 0
|
|
1234
|
-
|
|
1235
|
-
def get_oldest_pending_intent_age(self, chain_id: int) -> float | None:
|
|
1236
|
-
query = """
|
|
1237
|
-
SELECT EXTRACT(EPOCH FROM (NOW() - MIN(created_at)))::float AS age_seconds
|
|
1238
|
-
FROM tx_intents
|
|
1239
|
-
WHERE chain_id = %(chain_id)s
|
|
1240
|
-
AND status IN ('created', 'pending', 'claimed', 'sending')
|
|
1241
|
-
"""
|
|
1242
|
-
result = self.execute_one(query, {"chain_id": chain_id})
|
|
1243
|
-
if result and result.get("age_seconds") is not None:
|
|
1244
|
-
return result["age_seconds"]
|
|
1245
|
-
return None
|
|
1246
|
-
|
|
1247
|
-
def list_intent_inconsistencies(
|
|
1248
|
-
self,
|
|
1249
|
-
max_age_seconds: int,
|
|
1250
|
-
limit: int = 100,
|
|
1251
|
-
chain_id: int | None = None,
|
|
1252
|
-
) -> list[dict[str, Any]]:
|
|
1253
|
-
chain_clause = ""
|
|
1254
|
-
chain_params: list[Any] = []
|
|
1255
|
-
if chain_id is not None:
|
|
1256
|
-
chain_clause = " AND chain_id = %s"
|
|
1257
|
-
chain_params = [chain_id] * 5
|
|
1258
|
-
|
|
1259
|
-
query = f"""
|
|
1260
|
-
SELECT intent_id, status, 'pending_no_attempt' AS reason
|
|
1261
|
-
FROM tx_intents
|
|
1262
|
-
WHERE status = 'pending'
|
|
1263
|
-
{chain_clause}
|
|
1264
|
-
AND NOT EXISTS (
|
|
1265
|
-
SELECT 1 FROM tx_attempts
|
|
1266
|
-
WHERE tx_attempts.intent_id = tx_intents.intent_id
|
|
1267
|
-
AND tx_attempts.tx_hash IS NOT NULL
|
|
1268
|
-
)
|
|
1269
|
-
|
|
1270
|
-
UNION ALL
|
|
1271
|
-
SELECT intent_id, status, 'confirmed_no_confirmed_attempt' AS reason
|
|
1272
|
-
FROM tx_intents
|
|
1273
|
-
WHERE status = 'confirmed'
|
|
1274
|
-
{chain_clause}
|
|
1275
|
-
AND NOT EXISTS (
|
|
1276
|
-
SELECT 1 FROM tx_attempts
|
|
1277
|
-
WHERE tx_attempts.intent_id = tx_intents.intent_id
|
|
1278
|
-
AND tx_attempts.status = 'confirmed'
|
|
1279
|
-
)
|
|
1280
|
-
|
|
1281
|
-
UNION ALL
|
|
1282
|
-
SELECT intent_id, status, 'claimed_missing_claim' AS reason
|
|
1283
|
-
FROM tx_intents
|
|
1284
|
-
WHERE status = 'claimed'
|
|
1285
|
-
{chain_clause}
|
|
1286
|
-
AND (claim_token IS NULL OR claimed_at IS NULL)
|
|
1287
|
-
|
|
1288
|
-
UNION ALL
|
|
1289
|
-
SELECT intent_id, status, 'nonclaimed_with_claim' AS reason
|
|
1290
|
-
FROM tx_intents
|
|
1291
|
-
WHERE status != 'claimed'
|
|
1292
|
-
{chain_clause}
|
|
1293
|
-
AND (claim_token IS NOT NULL OR claimed_at IS NOT NULL)
|
|
1294
|
-
|
|
1295
|
-
UNION ALL
|
|
1296
|
-
SELECT intent_id, status, 'sending_stuck' AS reason
|
|
1297
|
-
FROM tx_intents
|
|
1298
|
-
WHERE status = 'sending'
|
|
1299
|
-
{chain_clause}
|
|
1300
|
-
AND updated_at < NOW() - INTERVAL '1 second' * %s
|
|
1301
|
-
|
|
1302
|
-
LIMIT %s
|
|
1303
|
-
"""
|
|
1304
|
-
params = chain_params + [max_age_seconds, limit]
|
|
1305
|
-
rows = self.execute_returning(query, tuple(params))
|
|
1306
|
-
return [dict(row) for row in rows]
|
|
1307
|
-
|
|
1308
|
-
def list_sending_intents_older_than(
|
|
1309
|
-
self,
|
|
1310
|
-
max_age_seconds: int,
|
|
1311
|
-
limit: int = 100,
|
|
1312
|
-
chain_id: int | None = None,
|
|
1313
|
-
) -> list[TxIntent]:
|
|
1314
|
-
query = """
|
|
1315
|
-
SELECT * FROM tx_intents
|
|
1316
|
-
WHERE status = 'sending'
|
|
1317
|
-
AND updated_at < NOW() - INTERVAL '1 second' * %s
|
|
1318
|
-
"""
|
|
1319
|
-
params: list[Any] = [max_age_seconds]
|
|
1320
|
-
if chain_id is not None:
|
|
1321
|
-
query += " AND chain_id = %s"
|
|
1322
|
-
params.append(chain_id)
|
|
1323
|
-
query += " ORDER BY updated_at ASC LIMIT %s"
|
|
1324
|
-
params.append(limit)
|
|
1325
|
-
rows = self.execute_returning(query, tuple(params))
|
|
1326
|
-
return [self._row_to_intent(row) for row in rows]
|
|
1327
|
-
|
|
1328
|
-
def claim_next_intent(
|
|
1329
|
-
self,
|
|
1330
|
-
claim_token: str,
|
|
1331
|
-
claimed_by: str | None = None,
|
|
1332
|
-
) -> TxIntent | None:
|
|
1333
|
-
"""Claim the next available intent using FOR UPDATE SKIP LOCKED."""
|
|
1334
|
-
row = self.execute_one(
|
|
1335
|
-
"""
|
|
1336
|
-
WITH claimed AS (
|
|
1337
|
-
SELECT intent_id FROM tx_intents
|
|
1338
|
-
WHERE status = 'created'
|
|
1339
|
-
AND (deadline_ts IS NULL OR deadline_ts > NOW())
|
|
1340
|
-
AND (retry_after IS NULL OR retry_after <= NOW())
|
|
1341
|
-
ORDER BY created_at ASC
|
|
1342
|
-
FOR UPDATE SKIP LOCKED
|
|
1343
|
-
LIMIT 1
|
|
1344
|
-
)
|
|
1345
|
-
UPDATE tx_intents
|
|
1346
|
-
SET status = 'claimed', claim_token = %s, claimed_at = NOW(),
|
|
1347
|
-
claimed_by = %s,
|
|
1348
|
-
retry_after = NULL, updated_at = NOW()
|
|
1349
|
-
WHERE intent_id = (SELECT intent_id FROM claimed)
|
|
1350
|
-
RETURNING *
|
|
1351
|
-
""",
|
|
1352
|
-
(claim_token, claimed_by),
|
|
1353
|
-
)
|
|
1354
|
-
if row:
|
|
1355
|
-
return self._row_to_intent(row)
|
|
1356
|
-
return None
|
|
1357
|
-
|
|
1358
|
-
def update_intent_status(
|
|
1359
|
-
self,
|
|
1360
|
-
intent_id: UUID,
|
|
1361
|
-
status: str,
|
|
1362
|
-
claim_token: str | None = None,
|
|
1363
|
-
) -> bool:
|
|
1364
|
-
if claim_token:
|
|
1365
|
-
result = self.execute_returning(
|
|
1366
|
-
"""
|
|
1367
|
-
UPDATE tx_intents SET status = %s, claim_token = %s,
|
|
1368
|
-
claimed_at = NOW(), updated_at = NOW()
|
|
1369
|
-
WHERE intent_id = %s
|
|
1370
|
-
RETURNING intent_id
|
|
1371
|
-
""",
|
|
1372
|
-
(status, claim_token, intent_id),
|
|
1373
|
-
)
|
|
1374
|
-
else:
|
|
1375
|
-
result = self.execute_returning(
|
|
1376
|
-
"""
|
|
1377
|
-
UPDATE tx_intents SET status = %s, updated_at = NOW()
|
|
1378
|
-
WHERE intent_id = %s
|
|
1379
|
-
RETURNING intent_id
|
|
1380
|
-
""",
|
|
1381
|
-
(status, intent_id),
|
|
1382
|
-
)
|
|
1383
|
-
return len(result) > 0
|
|
1384
|
-
|
|
1385
|
-
def update_intent_status_if(
|
|
1386
|
-
self,
|
|
1387
|
-
intent_id: UUID,
|
|
1388
|
-
status: str,
|
|
1389
|
-
expected_status: str | list[str],
|
|
1390
|
-
) -> bool:
|
|
1391
|
-
if isinstance(expected_status, str):
|
|
1392
|
-
expected_status = [expected_status]
|
|
1393
|
-
result = self.execute_returning(
|
|
1394
|
-
"""
|
|
1395
|
-
UPDATE tx_intents SET status = %s, updated_at = NOW()
|
|
1396
|
-
WHERE intent_id = %s AND status = ANY(%s)
|
|
1397
|
-
RETURNING intent_id
|
|
1398
|
-
""",
|
|
1399
|
-
(status, intent_id, expected_status),
|
|
1400
|
-
)
|
|
1401
|
-
return len(result) > 0
|
|
1402
|
-
|
|
1403
|
-
def transition_intent_status(
|
|
1404
|
-
self,
|
|
1405
|
-
intent_id: UUID,
|
|
1406
|
-
from_statuses: list[str],
|
|
1407
|
-
to_status: str,
|
|
1408
|
-
) -> tuple[bool, str | None]:
|
|
1409
|
-
"""Atomic status transition with conditional claim clearing."""
|
|
1410
|
-
# Single UPDATE that:
|
|
1411
|
-
# 1. Captures old status via FROM subquery (LIMIT 1 for safety)
|
|
1412
|
-
# 2. Only updates if status matches allowed from_statuses
|
|
1413
|
-
# 3. Clears claim fields only when old status was 'claimed' AND to_status != 'claimed'
|
|
1414
|
-
result = self.execute_returning(
|
|
1415
|
-
"""
|
|
1416
|
-
UPDATE tx_intents ti
|
|
1417
|
-
SET
|
|
1418
|
-
status = %s,
|
|
1419
|
-
updated_at = NOW(),
|
|
1420
|
-
claim_token = CASE
|
|
1421
|
-
WHEN old.status = 'claimed' AND %s != 'claimed'
|
|
1422
|
-
THEN NULL ELSE ti.claim_token
|
|
1423
|
-
END,
|
|
1424
|
-
claimed_at = CASE
|
|
1425
|
-
WHEN old.status = 'claimed' AND %s != 'claimed'
|
|
1426
|
-
THEN NULL ELSE ti.claimed_at
|
|
1427
|
-
END,
|
|
1428
|
-
claimed_by = CASE
|
|
1429
|
-
WHEN old.status = 'claimed' AND %s != 'claimed'
|
|
1430
|
-
THEN NULL ELSE ti.claimed_by
|
|
1431
|
-
END
|
|
1432
|
-
FROM (
|
|
1433
|
-
SELECT status FROM tx_intents
|
|
1434
|
-
WHERE intent_id = %s
|
|
1435
|
-
LIMIT 1
|
|
1436
|
-
) old
|
|
1437
|
-
WHERE ti.intent_id = %s
|
|
1438
|
-
AND ti.status = ANY(%s)
|
|
1439
|
-
RETURNING old.status AS old_status
|
|
1440
|
-
""",
|
|
1441
|
-
(to_status, to_status, to_status, to_status, intent_id, intent_id, from_statuses),
|
|
1442
|
-
)
|
|
1443
|
-
|
|
1444
|
-
if result:
|
|
1445
|
-
return (True, result[0]["old_status"])
|
|
1446
|
-
return (False, None)
|
|
1447
|
-
|
|
1448
|
-
def update_intent_signer(self, intent_id: UUID, signer_address: str) -> bool:
|
|
1449
|
-
result = self.execute_returning(
|
|
1450
|
-
"""
|
|
1451
|
-
UPDATE tx_intents SET signer_address = %s, updated_at = NOW()
|
|
1452
|
-
WHERE intent_id = %s
|
|
1453
|
-
RETURNING intent_id
|
|
1454
|
-
""",
|
|
1455
|
-
(signer_address.lower(), intent_id),
|
|
1456
|
-
)
|
|
1457
|
-
return len(result) > 0
|
|
1458
|
-
|
|
1459
|
-
def release_intent_claim(self, intent_id: UUID) -> bool:
|
|
1460
|
-
result = self.execute_returning(
|
|
1461
|
-
"""
|
|
1462
|
-
UPDATE tx_intents SET status = 'created', claim_token = NULL,
|
|
1463
|
-
claimed_at = NULL, updated_at = NOW()
|
|
1464
|
-
WHERE intent_id = %s AND status = 'claimed'
|
|
1465
|
-
RETURNING intent_id
|
|
1466
|
-
""",
|
|
1467
|
-
(intent_id,),
|
|
1468
|
-
)
|
|
1469
|
-
return len(result) > 0
|
|
1470
|
-
|
|
1471
|
-
def release_intent_claim_if_token(self, intent_id: UUID, claim_token: str) -> bool:
|
|
1472
|
-
rowcount = self.execute_returning_rowcount(
|
|
1473
|
-
"""
|
|
1474
|
-
UPDATE tx_intents
|
|
1475
|
-
SET status = 'created',
|
|
1476
|
-
claim_token = NULL,
|
|
1477
|
-
claimed_at = NULL,
|
|
1478
|
-
claimed_by = NULL,
|
|
1479
|
-
updated_at = NOW()
|
|
1480
|
-
WHERE intent_id = %s AND claim_token = %s AND status = 'claimed'
|
|
1481
|
-
""",
|
|
1482
|
-
(intent_id, claim_token),
|
|
1483
|
-
)
|
|
1484
|
-
return rowcount == 1
|
|
1485
|
-
|
|
1486
|
-
def clear_intent_claim(self, intent_id: UUID) -> bool:
|
|
1487
|
-
result = self.execute_returning(
|
|
1488
|
-
"""
|
|
1489
|
-
UPDATE tx_intents
|
|
1490
|
-
SET claim_token = NULL, claimed_at = NULL, updated_at = NOW()
|
|
1491
|
-
WHERE intent_id = %s
|
|
1492
|
-
RETURNING intent_id
|
|
1493
|
-
""",
|
|
1494
|
-
(intent_id,),
|
|
1495
|
-
)
|
|
1496
|
-
return len(result) > 0
|
|
1497
|
-
|
|
1498
|
-
def set_intent_retry_after(self, intent_id: UUID, retry_after: datetime | None) -> bool:
|
|
1499
|
-
result = self.execute_returning(
|
|
1500
|
-
"""
|
|
1501
|
-
UPDATE tx_intents
|
|
1502
|
-
SET retry_after = %s, updated_at = NOW()
|
|
1503
|
-
WHERE intent_id = %s
|
|
1504
|
-
RETURNING intent_id
|
|
1505
|
-
""",
|
|
1506
|
-
(retry_after, intent_id),
|
|
1507
|
-
)
|
|
1508
|
-
return len(result) > 0
|
|
1509
|
-
|
|
1510
|
-
def increment_intent_retry_count(self, intent_id: UUID) -> int:
|
|
1511
|
-
result = self.execute_returning(
|
|
1512
|
-
"""
|
|
1513
|
-
UPDATE tx_intents
|
|
1514
|
-
SET retry_count = retry_count + 1, updated_at = NOW()
|
|
1515
|
-
WHERE intent_id = %s
|
|
1516
|
-
RETURNING retry_count
|
|
1517
|
-
""",
|
|
1518
|
-
(intent_id,),
|
|
1519
|
-
)
|
|
1520
|
-
if not result:
|
|
1521
|
-
return 0
|
|
1522
|
-
return result[0]["retry_count"]
|
|
1523
|
-
|
|
1524
|
-
def release_stale_intent_claims(self, max_age_seconds: int) -> int:
|
|
1525
|
-
result = self.execute_returning(
|
|
1526
|
-
"""
|
|
1527
|
-
UPDATE tx_intents
|
|
1528
|
-
SET status = 'created', claim_token = NULL, claimed_at = NULL, updated_at = NOW()
|
|
1529
|
-
WHERE status = 'claimed'
|
|
1530
|
-
AND claimed_at < NOW() - INTERVAL '1 second' * %s
|
|
1531
|
-
AND NOT EXISTS (
|
|
1532
|
-
SELECT 1 FROM tx_attempts WHERE tx_attempts.intent_id = tx_intents.intent_id
|
|
1533
|
-
)
|
|
1534
|
-
RETURNING intent_id
|
|
1535
|
-
""",
|
|
1536
|
-
(max_age_seconds,),
|
|
1537
|
-
)
|
|
1538
|
-
return len(result)
|
|
1539
|
-
|
|
1540
|
-
def abandon_intent(self, intent_id: UUID) -> bool:
|
|
1541
|
-
return self.update_intent_status(intent_id, "abandoned")
|
|
1542
|
-
|
|
1543
|
-
def get_pending_intents_for_signer(
|
|
1544
|
-
self, chain_id: int, address: str
|
|
1545
|
-
) -> list[TxIntent]:
|
|
1546
|
-
rows = self.execute_returning(
|
|
1547
|
-
"""
|
|
1548
|
-
SELECT * FROM tx_intents
|
|
1549
|
-
WHERE chain_id = %s AND signer_address = %s
|
|
1550
|
-
AND status IN ('sending', 'pending')
|
|
1551
|
-
ORDER BY created_at
|
|
1552
|
-
""",
|
|
1553
|
-
(chain_id, address),
|
|
1554
|
-
)
|
|
1555
|
-
return [self._row_to_intent(row) for row in rows]
|
|
1556
|
-
|
|
1557
|
-
# =========================================================================
|
|
1558
|
-
# Broadcast Binding Operations
|
|
1559
|
-
# =========================================================================
|
|
1560
|
-
|
|
1561
|
-
def get_broadcast_binding(self, intent_id: UUID) -> tuple[str | None, list[str]] | None:
|
|
1562
|
-
"""Get binding if exists, None for first broadcast.
|
|
1563
|
-
|
|
1564
|
-
Returns:
|
|
1565
|
-
Tuple of (group_name or None, endpoints) or None if not bound yet
|
|
1566
|
-
|
|
1567
|
-
Raises:
|
|
1568
|
-
ValueError: If binding is corrupt (wrong type, empty)
|
|
1569
|
-
"""
|
|
1570
|
-
row = self.execute_one(
|
|
1571
|
-
"""
|
|
1572
|
-
SELECT broadcast_group, broadcast_endpoints_json
|
|
1573
|
-
FROM tx_intents
|
|
1574
|
-
WHERE intent_id = %s
|
|
1575
|
-
""",
|
|
1576
|
-
(intent_id,),
|
|
1577
|
-
)
|
|
1578
|
-
|
|
1579
|
-
if not row:
|
|
1580
|
-
return None
|
|
1581
|
-
|
|
1582
|
-
has_endpoints = row["broadcast_endpoints_json"] is not None
|
|
1583
|
-
|
|
1584
|
-
# No endpoints → not bound yet
|
|
1585
|
-
if not has_endpoints:
|
|
1586
|
-
return None
|
|
1587
|
-
|
|
1588
|
-
# Parse and validate endpoints
|
|
1589
|
-
endpoints = json.loads(row["broadcast_endpoints_json"])
|
|
1590
|
-
if not isinstance(endpoints, list):
|
|
1591
|
-
raise ValueError(
|
|
1592
|
-
f"Corrupt binding for intent {intent_id}: "
|
|
1593
|
-
f"endpoints_json is {type(endpoints).__name__}, expected list"
|
|
1594
|
-
)
|
|
1595
|
-
if not endpoints:
|
|
1596
|
-
raise ValueError(
|
|
1597
|
-
f"Corrupt binding for intent {intent_id}: endpoints list is empty"
|
|
1598
|
-
)
|
|
1599
|
-
if not all(isinstance(ep, str) for ep in endpoints):
|
|
1600
|
-
raise ValueError(
|
|
1601
|
-
f"Corrupt binding for intent {intent_id}: endpoints contains non-string"
|
|
1602
|
-
)
|
|
1603
|
-
|
|
1604
|
-
return row["broadcast_group"], endpoints
|
|
1605
|
-
|
|
1606
|
-
# =========================================================================
|
|
1607
|
-
# Attempt Operations
|
|
1608
|
-
# =========================================================================
|
|
1609
|
-
|
|
1610
|
-
def create_attempt(
|
|
1611
|
-
self,
|
|
1612
|
-
attempt_id: UUID,
|
|
1613
|
-
intent_id: UUID,
|
|
1614
|
-
nonce: int,
|
|
1615
|
-
gas_params_json: str,
|
|
1616
|
-
status: str = "signed",
|
|
1617
|
-
tx_hash: str | None = None,
|
|
1618
|
-
replaces_attempt_id: UUID | None = None,
|
|
1619
|
-
broadcast_group: str | None = None,
|
|
1620
|
-
endpoint_url: str | None = None,
|
|
1621
|
-
binding: tuple[str | None, list[str]] | None = None,
|
|
1622
|
-
) -> TxAttempt:
|
|
1623
|
-
"""Create attempt, optionally setting binding atomically.
|
|
1624
|
-
|
|
1625
|
-
Args:
|
|
1626
|
-
binding: If provided (first broadcast), persist binding atomically.
|
|
1627
|
-
Tuple of (group_name or None, endpoints)
|
|
1628
|
-
|
|
1629
|
-
CRITICAL: Uses WHERE broadcast_endpoints_json IS NULL to prevent overwrites.
|
|
1630
|
-
"""
|
|
1631
|
-
with self.transaction(isolation_level=IsolationLevel.SERIALIZABLE):
|
|
1632
|
-
if binding is not None:
|
|
1633
|
-
# First broadcast: check existence + binding state for clear error messages
|
|
1634
|
-
row = self.execute_one(
|
|
1635
|
-
"SELECT broadcast_endpoints_json FROM tx_intents WHERE intent_id = %s FOR UPDATE",
|
|
1636
|
-
(intent_id,),
|
|
1637
|
-
)
|
|
1638
|
-
|
|
1639
|
-
if not row:
|
|
1640
|
-
raise ValueError(f"Intent {intent_id} not found")
|
|
1641
|
-
if row["broadcast_endpoints_json"] is not None:
|
|
1642
|
-
raise ValueError(
|
|
1643
|
-
f"Intent {intent_id} already bound. "
|
|
1644
|
-
f"Cannot rebind — may indicate race condition."
|
|
1645
|
-
)
|
|
1646
|
-
|
|
1647
|
-
group_name, endpoints = binding
|
|
1648
|
-
# Defensive copy
|
|
1649
|
-
endpoints_snapshot = list(endpoints)
|
|
1650
|
-
|
|
1651
|
-
# Update with WHERE guard
|
|
1652
|
-
updated = self.execute_one(
|
|
1653
|
-
"""
|
|
1654
|
-
UPDATE tx_intents
|
|
1655
|
-
SET broadcast_group = %s,
|
|
1656
|
-
broadcast_endpoints_json = %s,
|
|
1657
|
-
updated_at = NOW()
|
|
1658
|
-
WHERE intent_id = %s
|
|
1659
|
-
AND broadcast_endpoints_json IS NULL
|
|
1660
|
-
RETURNING intent_id
|
|
1661
|
-
""",
|
|
1662
|
-
(group_name, json.dumps(endpoints_snapshot), intent_id),
|
|
1663
|
-
)
|
|
1664
|
-
|
|
1665
|
-
if not updated:
|
|
1666
|
-
raise ValueError(
|
|
1667
|
-
f"Binding race condition for intent {intent_id}: "
|
|
1668
|
-
f"concurrent update detected"
|
|
1669
|
-
)
|
|
1670
|
-
|
|
1671
|
-
# Create attempt with broadcast audit fields
|
|
1672
|
-
row = self.execute_one(
|
|
1673
|
-
"""
|
|
1674
|
-
INSERT INTO tx_attempts (
|
|
1675
|
-
attempt_id, intent_id, nonce, gas_params_json, status,
|
|
1676
|
-
tx_hash, replaces_attempt_id, broadcast_group, endpoint_url
|
|
1677
|
-
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
|
|
1678
|
-
RETURNING *
|
|
1679
|
-
""",
|
|
1680
|
-
(
|
|
1681
|
-
attempt_id,
|
|
1682
|
-
intent_id,
|
|
1683
|
-
nonce,
|
|
1684
|
-
gas_params_json,
|
|
1685
|
-
status,
|
|
1686
|
-
tx_hash,
|
|
1687
|
-
replaces_attempt_id,
|
|
1688
|
-
broadcast_group,
|
|
1689
|
-
endpoint_url,
|
|
1690
|
-
),
|
|
1691
|
-
)
|
|
1692
|
-
if not row:
|
|
1693
|
-
raise DatabaseError("Failed to create attempt")
|
|
1694
|
-
return self._row_to_attempt(row)
|
|
1695
|
-
|
|
1696
|
-
def get_attempt(self, attempt_id: UUID) -> TxAttempt | None:
|
|
1697
|
-
row = self.execute_one(
|
|
1698
|
-
"SELECT * FROM tx_attempts WHERE attempt_id = %s",
|
|
1699
|
-
(attempt_id,),
|
|
1700
|
-
)
|
|
1701
|
-
if not row:
|
|
1702
|
-
return None
|
|
1703
|
-
return self._row_to_attempt(row)
|
|
1704
|
-
|
|
1705
|
-
def get_attempts_for_intent(self, intent_id: UUID) -> list[TxAttempt]:
|
|
1706
|
-
rows = self.execute_returning(
|
|
1707
|
-
"SELECT * FROM tx_attempts WHERE intent_id = %s ORDER BY created_at",
|
|
1708
|
-
(intent_id,),
|
|
1709
|
-
)
|
|
1710
|
-
return [self._row_to_attempt(row) for row in rows]
|
|
1711
|
-
|
|
1712
|
-
def get_latest_attempt_for_intent(self, intent_id: UUID) -> TxAttempt | None:
|
|
1713
|
-
row = self.execute_one(
|
|
1714
|
-
"""
|
|
1715
|
-
SELECT * FROM tx_attempts WHERE intent_id = %s
|
|
1716
|
-
ORDER BY created_at DESC LIMIT 1
|
|
1717
|
-
""",
|
|
1718
|
-
(intent_id,),
|
|
1719
|
-
)
|
|
1720
|
-
if not row:
|
|
1721
|
-
return None
|
|
1722
|
-
return self._row_to_attempt(row)
|
|
1723
|
-
|
|
1724
|
-
def get_attempt_by_tx_hash(self, tx_hash: str) -> TxAttempt | None:
|
|
1725
|
-
row = self.execute_one(
|
|
1726
|
-
"SELECT * FROM tx_attempts WHERE tx_hash = %s",
|
|
1727
|
-
(tx_hash,),
|
|
1728
|
-
)
|
|
1729
|
-
if not row:
|
|
1730
|
-
return None
|
|
1731
|
-
return self._row_to_attempt(row)
|
|
1732
|
-
|
|
1733
|
-
def _row_to_attempt(self, row: dict[str, Any]) -> TxAttempt:
|
|
1734
|
-
return TxAttempt(
|
|
1735
|
-
attempt_id=row["attempt_id"],
|
|
1736
|
-
intent_id=row["intent_id"],
|
|
1737
|
-
nonce=row["nonce"],
|
|
1738
|
-
tx_hash=row["tx_hash"],
|
|
1739
|
-
gas_params=GasParams.from_json(row["gas_params_json"]),
|
|
1740
|
-
status=AttemptStatus(row["status"]),
|
|
1741
|
-
error_code=row["error_code"],
|
|
1742
|
-
error_detail=row["error_detail"],
|
|
1743
|
-
replaces_attempt_id=row["replaces_attempt_id"],
|
|
1744
|
-
broadcast_block=row["broadcast_block"],
|
|
1745
|
-
broadcast_at=row.get("broadcast_at"),
|
|
1746
|
-
included_block=row.get("included_block"),
|
|
1747
|
-
created_at=row["created_at"],
|
|
1748
|
-
updated_at=row["updated_at"],
|
|
1749
|
-
# Audit trail (may be None for older attempts)
|
|
1750
|
-
broadcast_group=row.get("broadcast_group"),
|
|
1751
|
-
endpoint_url=row.get("endpoint_url"),
|
|
1752
|
-
)
|
|
1753
|
-
|
|
1754
|
-
def update_attempt_status(
|
|
1755
|
-
self,
|
|
1756
|
-
attempt_id: UUID,
|
|
1757
|
-
status: str,
|
|
1758
|
-
tx_hash: str | None = None,
|
|
1759
|
-
broadcast_block: int | None = None,
|
|
1760
|
-
broadcast_at: datetime | None = None,
|
|
1761
|
-
included_block: int | None = None,
|
|
1762
|
-
error_code: str | None = None,
|
|
1763
|
-
error_detail: str | None = None,
|
|
1764
|
-
) -> bool:
|
|
1765
|
-
# Build dynamic update
|
|
1766
|
-
updates = ["status = %s", "updated_at = NOW()"]
|
|
1767
|
-
params: list[Any] = [status]
|
|
1768
|
-
param_idx = 2
|
|
1769
|
-
|
|
1770
|
-
if tx_hash is not None:
|
|
1771
|
-
updates.append(f"tx_hash = ${param_idx}")
|
|
1772
|
-
params.append(tx_hash)
|
|
1773
|
-
param_idx += 1
|
|
1774
|
-
if broadcast_block is not None:
|
|
1775
|
-
updates.append(f"broadcast_block = ${param_idx}")
|
|
1776
|
-
params.append(broadcast_block)
|
|
1777
|
-
param_idx += 1
|
|
1778
|
-
if broadcast_at is not None:
|
|
1779
|
-
updates.append(f"broadcast_at = ${param_idx}")
|
|
1780
|
-
params.append(broadcast_at)
|
|
1781
|
-
param_idx += 1
|
|
1782
|
-
if included_block is not None:
|
|
1783
|
-
updates.append(f"included_block = ${param_idx}")
|
|
1784
|
-
params.append(included_block)
|
|
1785
|
-
param_idx += 1
|
|
1786
|
-
if error_code is not None:
|
|
1787
|
-
updates.append(f"error_code = ${param_idx}")
|
|
1788
|
-
params.append(error_code)
|
|
1789
|
-
param_idx += 1
|
|
1790
|
-
if error_detail is not None:
|
|
1791
|
-
updates.append(f"error_detail = ${param_idx}")
|
|
1792
|
-
params.append(error_detail)
|
|
1793
|
-
param_idx += 1
|
|
1794
|
-
|
|
1795
|
-
params.append(attempt_id)
|
|
1796
|
-
query = f"UPDATE tx_attempts SET {', '.join(updates)} WHERE attempt_id = ${param_idx} RETURNING attempt_id"
|
|
1797
|
-
result = self.execute_returning(query, tuple(params))
|
|
1798
|
-
return len(result) > 0
|
|
1799
|
-
|
|
1800
|
-
# =========================================================================
|
|
1801
|
-
# Transaction Operations (NEW - replaces Intent/Attempt in Phase 2+)
|
|
1802
|
-
#
|
|
1803
|
-
# IMPORTANT: Transaction is the only durable execution model.
|
|
1804
|
-
# Do not add attempt-related methods here.
|
|
1805
|
-
# =========================================================================
|
|
1806
|
-
|
|
1807
|
-
def create_tx(
|
|
1808
|
-
self,
|
|
1809
|
-
tx_id: UUID,
|
|
1810
|
-
job_id: str,
|
|
1811
|
-
chain_id: int,
|
|
1812
|
-
idempotency_key: str,
|
|
1813
|
-
signer_address: str,
|
|
1814
|
-
to_address: str,
|
|
1815
|
-
data: str | None,
|
|
1816
|
-
value_wei: str,
|
|
1817
|
-
min_confirmations: int,
|
|
1818
|
-
deadline_ts: datetime | None,
|
|
1819
|
-
gas_params: GasParams | None = None,
|
|
1820
|
-
) -> Transaction | None:
|
|
1821
|
-
"""Create a new transaction.
|
|
1822
|
-
|
|
1823
|
-
Returns None if idempotency_key already exists (idempotency).
|
|
1824
|
-
"""
|
|
1825
|
-
gas_params_json = gas_params.to_json() if gas_params else None
|
|
1826
|
-
try:
|
|
1827
|
-
result = self.execute_returning(
|
|
1828
|
-
"""
|
|
1829
|
-
INSERT INTO transactions (
|
|
1830
|
-
tx_id, job_id, chain_id, idempotency_key,
|
|
1831
|
-
signer_address, to_address, data, value_wei,
|
|
1832
|
-
min_confirmations, deadline_ts, status,
|
|
1833
|
-
replacement_count, gas_params_json
|
|
1834
|
-
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'created', 0, %s)
|
|
1835
|
-
ON CONFLICT (chain_id, signer_address, idempotency_key) DO NOTHING
|
|
1836
|
-
RETURNING *
|
|
1837
|
-
""",
|
|
1838
|
-
(
|
|
1839
|
-
tx_id,
|
|
1840
|
-
job_id,
|
|
1841
|
-
chain_id,
|
|
1842
|
-
idempotency_key,
|
|
1843
|
-
signer_address,
|
|
1844
|
-
to_address,
|
|
1845
|
-
data,
|
|
1846
|
-
value_wei,
|
|
1847
|
-
min_confirmations,
|
|
1848
|
-
deadline_ts,
|
|
1849
|
-
gas_params_json,
|
|
1850
|
-
),
|
|
1851
|
-
)
|
|
1852
|
-
if not result:
|
|
1853
|
-
return None
|
|
1854
|
-
return self._row_to_transaction(result[0])
|
|
1855
|
-
except psycopg.errors.UniqueViolation:
|
|
1856
|
-
return None
|
|
1857
|
-
|
|
1858
|
-
def get_tx(self, tx_id: UUID) -> Transaction | None:
|
|
1859
|
-
"""Get a transaction by ID."""
|
|
1860
|
-
row = self.execute_one(
|
|
1861
|
-
"SELECT * FROM transactions WHERE tx_id = %s",
|
|
1862
|
-
(tx_id,),
|
|
1863
|
-
)
|
|
1864
|
-
if not row:
|
|
1865
|
-
return None
|
|
1866
|
-
return self._row_to_transaction(row)
|
|
1867
|
-
|
|
1868
|
-
def get_tx_by_idempotency_key(
|
|
1869
|
-
self,
|
|
1870
|
-
chain_id: int,
|
|
1871
|
-
signer_address: str,
|
|
1872
|
-
idempotency_key: str,
|
|
1873
|
-
) -> Transaction | None:
|
|
1874
|
-
"""Get a transaction by idempotency key (scoped to chain and signer)."""
|
|
1875
|
-
row = self.execute_one(
|
|
1876
|
-
"SELECT * FROM transactions WHERE chain_id = %s AND signer_address = %s AND idempotency_key = %s",
|
|
1877
|
-
(chain_id, signer_address.lower(), idempotency_key),
|
|
1878
|
-
)
|
|
1879
|
-
if not row:
|
|
1880
|
-
return None
|
|
1881
|
-
return self._row_to_transaction(row)
|
|
1882
|
-
|
|
1883
|
-
def get_tx_by_hash(self, tx_hash: str) -> Transaction | None:
|
|
1884
|
-
"""Get a transaction by current tx hash.
|
|
1885
|
-
|
|
1886
|
-
NOTE: Does NOT search tx_hash_history. Only matches current_tx_hash.
|
|
1887
|
-
"""
|
|
1888
|
-
row = self.execute_one(
|
|
1889
|
-
"SELECT * FROM transactions WHERE current_tx_hash = %s",
|
|
1890
|
-
(tx_hash,),
|
|
1891
|
-
)
|
|
1892
|
-
if not row:
|
|
1893
|
-
return None
|
|
1894
|
-
return self._row_to_transaction(row)
|
|
1895
|
-
|
|
1896
|
-
def list_pending_txs(
|
|
1897
|
-
self,
|
|
1898
|
-
chain_id: int | None = None,
|
|
1899
|
-
job_id: str | None = None,
|
|
1900
|
-
) -> list[Transaction]:
|
|
1901
|
-
"""List transactions in CREATED or BROADCAST status."""
|
|
1902
|
-
query = "SELECT * FROM transactions WHERE status IN ('created', 'broadcast')"
|
|
1903
|
-
params: list[Any] = []
|
|
1904
|
-
param_idx = 1
|
|
1905
|
-
|
|
1906
|
-
if chain_id is not None:
|
|
1907
|
-
query += f" AND chain_id = ${param_idx}"
|
|
1908
|
-
params.append(chain_id)
|
|
1909
|
-
param_idx += 1
|
|
1910
|
-
if job_id is not None:
|
|
1911
|
-
query += f" AND job_id = ${param_idx}"
|
|
1912
|
-
params.append(job_id)
|
|
1913
|
-
param_idx += 1
|
|
1914
|
-
|
|
1915
|
-
query += " ORDER BY created_at ASC"
|
|
1916
|
-
rows = self.execute_returning(query, tuple(params) if params else None)
|
|
1917
|
-
return [self._row_to_transaction(row) for row in rows]
|
|
1918
|
-
|
|
1919
|
-
def claim_tx(self, claim_token: str) -> Transaction | None:
|
|
1920
|
-
"""Claim the next CREATED transaction for processing.
|
|
1921
|
-
|
|
1922
|
-
Status remains CREATED while claimed - no "claimed" status.
|
|
1923
|
-
Uses FOR UPDATE SKIP LOCKED for non-blocking claim.
|
|
1924
|
-
"""
|
|
1925
|
-
result = self.execute_returning(
|
|
1926
|
-
"""
|
|
1927
|
-
UPDATE transactions
|
|
1928
|
-
SET claim_token = %s, claimed_at = NOW(), updated_at = NOW()
|
|
1929
|
-
WHERE tx_id = (
|
|
1930
|
-
SELECT tx_id FROM transactions
|
|
1931
|
-
WHERE status = 'created'
|
|
1932
|
-
AND claim_token IS NULL
|
|
1933
|
-
AND (deadline_ts IS NULL OR deadline_ts > NOW())
|
|
1934
|
-
ORDER BY created_at ASC, tx_id ASC
|
|
1935
|
-
FOR UPDATE SKIP LOCKED
|
|
1936
|
-
LIMIT 1
|
|
1937
|
-
)
|
|
1938
|
-
RETURNING *
|
|
1939
|
-
""",
|
|
1940
|
-
(claim_token,),
|
|
1941
|
-
)
|
|
1942
|
-
if not result:
|
|
1943
|
-
return None
|
|
1944
|
-
return self._row_to_transaction(result[0])
|
|
1945
|
-
|
|
1946
|
-
def set_tx_broadcast(
|
|
1947
|
-
self,
|
|
1948
|
-
tx_id: UUID,
|
|
1949
|
-
tx_hash: str,
|
|
1950
|
-
nonce: int,
|
|
1951
|
-
gas_params: GasParams,
|
|
1952
|
-
broadcast_block: int,
|
|
1953
|
-
broadcast_info: BroadcastInfo | None = None,
|
|
1954
|
-
) -> bool:
|
|
1955
|
-
"""Record initial broadcast.
|
|
1956
|
-
|
|
1957
|
-
Sets status=BROADCAST, creates first tx_hash_history record.
|
|
1958
|
-
"""
|
|
1959
|
-
now = datetime.now(timezone.utc)
|
|
1960
|
-
|
|
1961
|
-
# Create first history record
|
|
1962
|
-
history_record = TxHashRecord(
|
|
1963
|
-
tx_hash=tx_hash,
|
|
1964
|
-
nonce=nonce,
|
|
1965
|
-
broadcast_at=now.isoformat(),
|
|
1966
|
-
broadcast_block=broadcast_block,
|
|
1967
|
-
gas_limit=gas_params.gas_limit,
|
|
1968
|
-
max_fee_per_gas=gas_params.max_fee_per_gas,
|
|
1969
|
-
max_priority_fee_per_gas=gas_params.max_priority_fee_per_gas,
|
|
1970
|
-
reason="initial",
|
|
1971
|
-
outcome=None,
|
|
1972
|
-
)
|
|
1973
|
-
tx_hash_history = json.dumps([history_record.to_dict()])
|
|
1974
|
-
|
|
1975
|
-
result = self.execute_returning(
|
|
1976
|
-
"""
|
|
1977
|
-
UPDATE transactions
|
|
1978
|
-
SET status = 'broadcast',
|
|
1979
|
-
current_tx_hash = %s,
|
|
1980
|
-
current_nonce = %s,
|
|
1981
|
-
gas_params_json = %s,
|
|
1982
|
-
broadcast_info_json = %s,
|
|
1983
|
-
tx_hash_history = %s,
|
|
1984
|
-
updated_at = NOW()
|
|
1985
|
-
WHERE tx_id = %s
|
|
1986
|
-
AND status = 'created'
|
|
1987
|
-
RETURNING tx_id
|
|
1988
|
-
""",
|
|
1989
|
-
(
|
|
1990
|
-
tx_hash,
|
|
1991
|
-
nonce,
|
|
1992
|
-
gas_params.to_json(),
|
|
1993
|
-
broadcast_info.to_json() if broadcast_info else None,
|
|
1994
|
-
tx_hash_history,
|
|
1995
|
-
tx_id,
|
|
1996
|
-
),
|
|
1997
|
-
)
|
|
1998
|
-
return len(result) > 0
|
|
1999
|
-
|
|
2000
|
-
def set_tx_replaced(
|
|
2001
|
-
self,
|
|
2002
|
-
tx_id: UUID,
|
|
2003
|
-
new_tx_hash: str,
|
|
2004
|
-
gas_params: GasParams,
|
|
2005
|
-
broadcast_block: int,
|
|
2006
|
-
reason: str = "fee_bump",
|
|
2007
|
-
) -> bool:
|
|
2008
|
-
"""Record replacement broadcast.
|
|
2009
|
-
|
|
2010
|
-
Appends to tx_hash_history, updates current_tx_hash, increments
|
|
2011
|
-
replacement_count. Status remains BROADCAST.
|
|
2012
|
-
"""
|
|
2013
|
-
now = datetime.now(timezone.utc)
|
|
2014
|
-
|
|
2015
|
-
# First, get current state
|
|
2016
|
-
row = self.execute_one(
|
|
2017
|
-
"SELECT current_nonce, tx_hash_history FROM transactions WHERE tx_id = %s AND status = 'broadcast'",
|
|
2018
|
-
(tx_id,),
|
|
2019
|
-
)
|
|
2020
|
-
if not row:
|
|
2021
|
-
return False
|
|
2022
|
-
|
|
2023
|
-
nonce = row["current_nonce"]
|
|
2024
|
-
existing_history = json.loads(row["tx_hash_history"]) if row["tx_hash_history"] else []
|
|
2025
|
-
|
|
2026
|
-
# Mark previous entry as replaced
|
|
2027
|
-
if existing_history:
|
|
2028
|
-
existing_history[-1]["outcome"] = "replaced"
|
|
2029
|
-
|
|
2030
|
-
# Add new history record
|
|
2031
|
-
new_record = TxHashRecord(
|
|
2032
|
-
tx_hash=new_tx_hash,
|
|
2033
|
-
nonce=nonce,
|
|
2034
|
-
broadcast_at=now.isoformat(),
|
|
2035
|
-
broadcast_block=broadcast_block,
|
|
2036
|
-
gas_limit=gas_params.gas_limit,
|
|
2037
|
-
max_fee_per_gas=gas_params.max_fee_per_gas,
|
|
2038
|
-
max_priority_fee_per_gas=gas_params.max_priority_fee_per_gas,
|
|
2039
|
-
reason=reason,
|
|
2040
|
-
outcome=None,
|
|
2041
|
-
)
|
|
2042
|
-
existing_history.append(new_record.to_dict())
|
|
2043
|
-
|
|
2044
|
-
result = self.execute_returning(
|
|
2045
|
-
"""
|
|
2046
|
-
UPDATE transactions
|
|
2047
|
-
SET current_tx_hash = %s,
|
|
2048
|
-
gas_params_json = %s,
|
|
2049
|
-
tx_hash_history = %s,
|
|
2050
|
-
replacement_count = replacement_count + 1,
|
|
2051
|
-
updated_at = NOW()
|
|
2052
|
-
WHERE tx_id = %s
|
|
2053
|
-
AND status = 'broadcast'
|
|
2054
|
-
RETURNING tx_id
|
|
2055
|
-
""",
|
|
2056
|
-
(
|
|
2057
|
-
new_tx_hash,
|
|
2058
|
-
gas_params.to_json(),
|
|
2059
|
-
json.dumps(existing_history),
|
|
2060
|
-
tx_id,
|
|
2061
|
-
),
|
|
2062
|
-
)
|
|
2063
|
-
return len(result) > 0
|
|
2064
|
-
|
|
2065
|
-
def set_tx_confirmed(
|
|
2066
|
-
self,
|
|
2067
|
-
tx_id: UUID,
|
|
2068
|
-
included_block: int,
|
|
2069
|
-
) -> bool:
|
|
2070
|
-
"""Mark transaction confirmed.
|
|
2071
|
-
|
|
2072
|
-
Sets status=CONFIRMED, included_block, confirmed_at.
|
|
2073
|
-
Updates tx_hash_history with outcome.
|
|
2074
|
-
"""
|
|
2075
|
-
now = datetime.now(timezone.utc)
|
|
2076
|
-
|
|
2077
|
-
# Get and update history
|
|
2078
|
-
row = self.execute_one(
|
|
2079
|
-
"SELECT tx_hash_history FROM transactions WHERE tx_id = %s AND status = 'broadcast'",
|
|
2080
|
-
(tx_id,),
|
|
2081
|
-
)
|
|
2082
|
-
if not row:
|
|
2083
|
-
return False
|
|
2084
|
-
|
|
2085
|
-
existing_history = json.loads(row["tx_hash_history"]) if row["tx_hash_history"] else []
|
|
2086
|
-
if existing_history:
|
|
2087
|
-
existing_history[-1]["outcome"] = "confirmed"
|
|
2088
|
-
|
|
2089
|
-
result = self.execute_returning(
|
|
2090
|
-
"""
|
|
2091
|
-
UPDATE transactions
|
|
2092
|
-
SET status = 'confirmed',
|
|
2093
|
-
included_block = %s,
|
|
2094
|
-
confirmed_at = %s,
|
|
2095
|
-
tx_hash_history = %s,
|
|
2096
|
-
updated_at = NOW()
|
|
2097
|
-
WHERE tx_id = %s
|
|
2098
|
-
AND status = 'broadcast'
|
|
2099
|
-
RETURNING tx_id
|
|
2100
|
-
""",
|
|
2101
|
-
(
|
|
2102
|
-
included_block,
|
|
2103
|
-
now,
|
|
2104
|
-
json.dumps(existing_history),
|
|
2105
|
-
tx_id,
|
|
2106
|
-
),
|
|
2107
|
-
)
|
|
2108
|
-
return len(result) > 0
|
|
2109
|
-
|
|
2110
|
-
def set_tx_failed(
|
|
2111
|
-
self,
|
|
2112
|
-
tx_id: UUID,
|
|
2113
|
-
failure_type: FailureType,
|
|
2114
|
-
error_info: ErrorInfo | None = None,
|
|
2115
|
-
) -> bool:
|
|
2116
|
-
"""Mark transaction failed.
|
|
2117
|
-
|
|
2118
|
-
Sets status=FAILED, failure_type, error_info_json.
|
|
2119
|
-
Updates tx_hash_history with outcome if applicable.
|
|
2120
|
-
"""
|
|
2121
|
-
# Serialize error_info
|
|
2122
|
-
error_info_json = None
|
|
2123
|
-
if error_info:
|
|
2124
|
-
error_info_json = json.dumps({
|
|
2125
|
-
"error_type": error_info.error_type,
|
|
2126
|
-
"message": error_info.message,
|
|
2127
|
-
"code": error_info.code,
|
|
2128
|
-
})
|
|
2129
|
-
|
|
2130
|
-
# Get and update history if broadcast
|
|
2131
|
-
row = self.execute_one(
|
|
2132
|
-
"SELECT status, tx_hash_history FROM transactions WHERE tx_id = %s AND status IN ('created', 'broadcast')",
|
|
2133
|
-
(tx_id,),
|
|
2134
|
-
)
|
|
2135
|
-
if not row:
|
|
2136
|
-
return False
|
|
2137
|
-
|
|
2138
|
-
existing_history = json.loads(row["tx_hash_history"]) if row["tx_hash_history"] else []
|
|
2139
|
-
if existing_history and row["status"] == "broadcast":
|
|
2140
|
-
existing_history[-1]["outcome"] = "failed"
|
|
2141
|
-
|
|
2142
|
-
result = self.execute_returning(
|
|
2143
|
-
"""
|
|
2144
|
-
UPDATE transactions
|
|
2145
|
-
SET status = 'failed',
|
|
2146
|
-
failure_type = %s,
|
|
2147
|
-
error_info_json = %s,
|
|
2148
|
-
tx_hash_history = %s,
|
|
2149
|
-
updated_at = NOW()
|
|
2150
|
-
WHERE tx_id = %s
|
|
2151
|
-
AND status IN ('created', 'broadcast')
|
|
2152
|
-
RETURNING tx_id
|
|
2153
|
-
""",
|
|
2154
|
-
(
|
|
2155
|
-
failure_type.value,
|
|
2156
|
-
error_info_json,
|
|
2157
|
-
json.dumps(existing_history) if existing_history else None,
|
|
2158
|
-
tx_id,
|
|
2159
|
-
),
|
|
2160
|
-
)
|
|
2161
|
-
return len(result) > 0
|
|
2162
|
-
|
|
2163
|
-
def release_stale_tx_claims(self, max_age_seconds: int) -> int:
|
|
2164
|
-
"""Release claims older than threshold. 0 = release all claims."""
|
|
2165
|
-
if max_age_seconds == 0:
|
|
2166
|
-
# Release ALL claims
|
|
2167
|
-
result = self.execute_returning(
|
|
2168
|
-
"""
|
|
2169
|
-
UPDATE transactions
|
|
2170
|
-
SET claim_token = NULL, claimed_at = NULL, updated_at = NOW()
|
|
2171
|
-
WHERE status = 'created'
|
|
2172
|
-
AND claim_token IS NOT NULL
|
|
2173
|
-
RETURNING tx_id
|
|
2174
|
-
"""
|
|
2175
|
-
)
|
|
2176
|
-
else:
|
|
2177
|
-
result = self.execute_returning(
|
|
2178
|
-
"""
|
|
2179
|
-
UPDATE transactions
|
|
2180
|
-
SET claim_token = NULL, claimed_at = NULL, updated_at = NOW()
|
|
2181
|
-
WHERE status = 'created'
|
|
2182
|
-
AND claim_token IS NOT NULL
|
|
2183
|
-
AND claimed_at < NOW() - INTERVAL '%s seconds'
|
|
2184
|
-
RETURNING tx_id
|
|
2185
|
-
""",
|
|
2186
|
-
(max_age_seconds,),
|
|
2187
|
-
)
|
|
2188
|
-
return len(result)
|
|
2189
|
-
|
|
2190
|
-
def _row_to_transaction(self, row: dict[str, Any]) -> Transaction:
|
|
2191
|
-
"""Convert database row to Transaction object."""
|
|
2192
|
-
tx_id = row["tx_id"]
|
|
2193
|
-
if isinstance(tx_id, str):
|
|
2194
|
-
tx_id = UUID(tx_id)
|
|
2195
|
-
|
|
2196
|
-
# Parse failure_type if present
|
|
2197
|
-
failure_type = None
|
|
2198
|
-
if row.get("failure_type"):
|
|
2199
|
-
failure_type = FailureType(row["failure_type"])
|
|
2200
|
-
|
|
2201
|
-
return Transaction(
|
|
2202
|
-
tx_id=tx_id,
|
|
2203
|
-
job_id=row["job_id"],
|
|
2204
|
-
chain_id=row["chain_id"],
|
|
2205
|
-
idempotency_key=row["idempotency_key"],
|
|
2206
|
-
signer_address=row["signer_address"],
|
|
2207
|
-
to_address=row["to_address"],
|
|
2208
|
-
data=row["data"],
|
|
2209
|
-
value_wei=row["value_wei"],
|
|
2210
|
-
min_confirmations=row["min_confirmations"],
|
|
2211
|
-
deadline_ts=row["deadline_ts"],
|
|
2212
|
-
status=TxStatus(row["status"]),
|
|
2213
|
-
failure_type=failure_type,
|
|
2214
|
-
current_tx_hash=row["current_tx_hash"],
|
|
2215
|
-
current_nonce=row["current_nonce"],
|
|
2216
|
-
replacement_count=row["replacement_count"],
|
|
2217
|
-
claim_token=row["claim_token"],
|
|
2218
|
-
claimed_at=row["claimed_at"],
|
|
2219
|
-
included_block=row["included_block"],
|
|
2220
|
-
confirmed_at=row["confirmed_at"],
|
|
2221
|
-
created_at=row["created_at"],
|
|
2222
|
-
updated_at=row["updated_at"],
|
|
2223
|
-
gas_params_json=row["gas_params_json"],
|
|
2224
|
-
broadcast_info_json=row["broadcast_info_json"],
|
|
2225
|
-
error_info_json=row["error_info_json"],
|
|
2226
|
-
tx_hash_history=row["tx_hash_history"],
|
|
2227
|
-
)
|
|
2228
|
-
|
|
2229
|
-
# =========================================================================
|
|
2230
|
-
# ABI Cache Operations
|
|
2231
|
-
# =========================================================================
|
|
2232
|
-
|
|
2233
|
-
def get_cached_abi(self, chain_id: int, address: str) -> ABICacheEntry | None:
|
|
2234
|
-
row = self.execute_one(
|
|
2235
|
-
"SELECT * FROM abi_cache WHERE chain_id = %s AND address = %s",
|
|
2236
|
-
(chain_id, address),
|
|
2237
|
-
)
|
|
2238
|
-
if not row:
|
|
2239
|
-
return None
|
|
2240
|
-
return ABICacheEntry(
|
|
2241
|
-
chain_id=row["chain_id"],
|
|
2242
|
-
address=row["address"],
|
|
2243
|
-
abi_json=row["abi_json"],
|
|
2244
|
-
source=row["source"],
|
|
2245
|
-
resolved_at=row["resolved_at"],
|
|
2246
|
-
)
|
|
2247
|
-
|
|
2248
|
-
def set_cached_abi(
|
|
2249
|
-
self,
|
|
2250
|
-
chain_id: int,
|
|
2251
|
-
address: str,
|
|
2252
|
-
abi_json: str,
|
|
2253
|
-
source: str,
|
|
2254
|
-
) -> None:
|
|
2255
|
-
self.execute(
|
|
2256
|
-
"""
|
|
2257
|
-
INSERT INTO abi_cache (chain_id, address, abi_json, source)
|
|
2258
|
-
VALUES (%s, %s, %s, %s)
|
|
2259
|
-
ON CONFLICT(chain_id, address) DO UPDATE SET
|
|
2260
|
-
abi_json = EXCLUDED.abi_json,
|
|
2261
|
-
source = EXCLUDED.source,
|
|
2262
|
-
resolved_at = NOW()
|
|
2263
|
-
""",
|
|
2264
|
-
(chain_id, address, abi_json, source),
|
|
2265
|
-
)
|
|
2266
|
-
|
|
2267
|
-
def clear_cached_abi(self, chain_id: int, address: str) -> bool:
|
|
2268
|
-
result = self.execute_returning(
|
|
2269
|
-
"DELETE FROM abi_cache WHERE chain_id = %s AND address = %s RETURNING chain_id",
|
|
2270
|
-
(chain_id, address),
|
|
2271
|
-
)
|
|
2272
|
-
return len(result) > 0
|
|
2273
|
-
|
|
2274
|
-
def cleanup_expired_abis(self, max_age_seconds: int) -> int:
|
|
2275
|
-
result = self.execute_returning(
|
|
2276
|
-
"""
|
|
2277
|
-
DELETE FROM abi_cache
|
|
2278
|
-
WHERE resolved_at < NOW() - INTERVAL '1 second' * %s
|
|
2279
|
-
RETURNING chain_id
|
|
2280
|
-
""",
|
|
2281
|
-
(max_age_seconds,),
|
|
2282
|
-
)
|
|
2283
|
-
return len(result)
|
|
2284
|
-
|
|
2285
|
-
# =========================================================================
|
|
2286
|
-
# Proxy Cache Operations
|
|
2287
|
-
# =========================================================================
|
|
2288
|
-
|
|
2289
|
-
def get_cached_proxy(
|
|
2290
|
-
self, chain_id: int, proxy_address: str
|
|
2291
|
-
) -> ProxyCacheEntry | None:
|
|
2292
|
-
row = self.execute_one(
|
|
2293
|
-
"SELECT * FROM proxy_cache WHERE chain_id = %s AND proxy_address = %s",
|
|
2294
|
-
(chain_id, proxy_address),
|
|
2295
|
-
)
|
|
2296
|
-
if not row:
|
|
2297
|
-
return None
|
|
2298
|
-
return ProxyCacheEntry(
|
|
2299
|
-
chain_id=row["chain_id"],
|
|
2300
|
-
proxy_address=row["proxy_address"],
|
|
2301
|
-
implementation_address=row["implementation_address"],
|
|
2302
|
-
resolved_at=row["resolved_at"],
|
|
2303
|
-
)
|
|
2304
|
-
|
|
2305
|
-
def set_cached_proxy(
|
|
2306
|
-
self,
|
|
2307
|
-
chain_id: int,
|
|
2308
|
-
proxy_address: str,
|
|
2309
|
-
implementation_address: str,
|
|
2310
|
-
) -> None:
|
|
2311
|
-
self.execute(
|
|
2312
|
-
"""
|
|
2313
|
-
INSERT INTO proxy_cache (chain_id, proxy_address, implementation_address)
|
|
2314
|
-
VALUES (%s, %s, %s)
|
|
2315
|
-
ON CONFLICT(chain_id, proxy_address) DO UPDATE SET
|
|
2316
|
-
implementation_address = EXCLUDED.implementation_address,
|
|
2317
|
-
resolved_at = NOW()
|
|
2318
|
-
""",
|
|
2319
|
-
(chain_id, proxy_address, implementation_address),
|
|
2320
|
-
)
|
|
2321
|
-
|
|
2322
|
-
def clear_cached_proxy(self, chain_id: int, proxy_address: str) -> bool:
|
|
2323
|
-
result = self.execute_returning(
|
|
2324
|
-
"DELETE FROM proxy_cache WHERE chain_id = %s AND proxy_address = %s RETURNING chain_id",
|
|
2325
|
-
(chain_id, proxy_address),
|
|
2326
|
-
)
|
|
2327
|
-
return len(result) > 0
|
|
2328
|
-
|
|
2329
|
-
# =========================================================================
|
|
2330
|
-
# Cleanup & Maintenance
|
|
2331
|
-
# =========================================================================
|
|
2332
|
-
|
|
2333
|
-
def cleanup_old_intents(
|
|
2334
|
-
self,
|
|
2335
|
-
older_than_days: int,
|
|
2336
|
-
statuses: list[str] | None = None,
|
|
2337
|
-
) -> int:
|
|
2338
|
-
if statuses is None:
|
|
2339
|
-
statuses = ["confirmed", "failed", "abandoned"]
|
|
2340
|
-
|
|
2341
|
-
result = self.execute_returning(
|
|
2342
|
-
"""
|
|
2343
|
-
DELETE FROM tx_intents
|
|
2344
|
-
WHERE status = ANY(%s)
|
|
2345
|
-
AND created_at < NOW() - INTERVAL '1 day' * %s
|
|
2346
|
-
RETURNING intent_id
|
|
2347
|
-
""",
|
|
2348
|
-
(statuses, older_than_days),
|
|
2349
|
-
)
|
|
2350
|
-
return len(result)
|
|
2351
|
-
|
|
2352
|
-
def get_database_stats(self) -> dict[str, Any]:
|
|
2353
|
-
"""Get database statistics for health checks."""
|
|
2354
|
-
stats: dict[str, Any] = {"type": "postgresql"}
|
|
2355
|
-
|
|
2356
|
-
# Count intents by status
|
|
2357
|
-
rows = self.execute_returning(
|
|
2358
|
-
"SELECT status, COUNT(*) as count FROM tx_intents GROUP BY status"
|
|
2359
|
-
)
|
|
2360
|
-
stats["intents_by_status"] = {row["status"]: row["count"] for row in rows}
|
|
2361
|
-
|
|
2362
|
-
# Count total jobs
|
|
2363
|
-
row = self.execute_one("SELECT COUNT(*) as count FROM jobs")
|
|
2364
|
-
stats["total_jobs"] = row["count"] if row else 0
|
|
2365
|
-
|
|
2366
|
-
# Count enabled jobs
|
|
2367
|
-
row = self.execute_one("SELECT COUNT(*) as count FROM jobs WHERE enabled = true")
|
|
2368
|
-
stats["enabled_jobs"] = row["count"] if row else 0
|
|
2369
|
-
|
|
2370
|
-
# Get block state
|
|
2371
|
-
rows = self.execute_returning("SELECT * FROM block_state")
|
|
2372
|
-
stats["block_states"] = [
|
|
2373
|
-
{
|
|
2374
|
-
"chain_id": row["chain_id"],
|
|
2375
|
-
"last_block": row["last_processed_block_number"],
|
|
2376
|
-
}
|
|
2377
|
-
for row in rows
|
|
2378
|
-
]
|
|
2379
|
-
|
|
2380
|
-
# Pool statistics if available
|
|
2381
|
-
if self._pool:
|
|
2382
|
-
stats["pool"] = {
|
|
2383
|
-
"size": self._pool.get_size(),
|
|
2384
|
-
"free": self._pool.get_idle_size(),
|
|
2385
|
-
"min": self._pool.get_min_size(),
|
|
2386
|
-
"max": self._pool.get_max_size(),
|
|
2387
|
-
}
|
|
2388
|
-
|
|
2389
|
-
return stats
|
|
2390
|
-
|
|
2391
|
-
# =========================================================================
|
|
2392
|
-
# Reconciliation Operations
|
|
2393
|
-
# =========================================================================
|
|
2394
|
-
|
|
2395
|
-
def clear_orphaned_claims(self, chain_id: int, older_than_minutes: int = 2) -> int:
|
|
2396
|
-
"""Clear claim fields where status != 'claimed' and claim is stale."""
|
|
2397
|
-
result = self.execute_returning(
|
|
2398
|
-
"""
|
|
2399
|
-
UPDATE tx_intents
|
|
2400
|
-
SET claim_token = NULL,
|
|
2401
|
-
claimed_at = NULL,
|
|
2402
|
-
claimed_by = NULL,
|
|
2403
|
-
updated_at = NOW()
|
|
2404
|
-
WHERE chain_id = %s
|
|
2405
|
-
AND status != 'claimed'
|
|
2406
|
-
AND claim_token IS NOT NULL
|
|
2407
|
-
AND claimed_at IS NOT NULL
|
|
2408
|
-
AND claimed_at < NOW() - make_interval(mins => %s)
|
|
2409
|
-
RETURNING intent_id
|
|
2410
|
-
""",
|
|
2411
|
-
(chain_id, older_than_minutes),
|
|
2412
|
-
)
|
|
2413
|
-
return len(result)
|
|
2414
|
-
|
|
2415
|
-
def release_orphaned_nonces(self, chain_id: int, older_than_minutes: int = 5) -> int:
|
|
2416
|
-
"""Release nonces for terminal intents that are stale."""
|
|
2417
|
-
# Only release 'reserved' (not 'in_flight' - that's scary without receipt check)
|
|
2418
|
-
# Guard on both intent.updated_at AND reservation.updated_at for safety
|
|
2419
|
-
result = self.execute_returning(
|
|
2420
|
-
"""
|
|
2421
|
-
UPDATE nonce_reservations nr
|
|
2422
|
-
SET status = 'released',
|
|
2423
|
-
updated_at = NOW()
|
|
2424
|
-
FROM tx_intents ti
|
|
2425
|
-
WHERE nr.intent_id = ti.intent_id
|
|
2426
|
-
AND nr.chain_id = %s
|
|
2427
|
-
AND nr.status = 'reserved'
|
|
2428
|
-
AND ti.status IN ('failed', 'abandoned', 'reverted')
|
|
2429
|
-
AND ti.updated_at < NOW() - make_interval(mins => %s)
|
|
2430
|
-
AND nr.updated_at < NOW() - make_interval(mins => %s)
|
|
2431
|
-
RETURNING nr.nonce
|
|
2432
|
-
""",
|
|
2433
|
-
(chain_id, older_than_minutes, older_than_minutes),
|
|
2434
|
-
)
|
|
2435
|
-
return len(result)
|
|
2436
|
-
|
|
2437
|
-
def count_pending_without_attempts(self, chain_id: int) -> int:
|
|
2438
|
-
"""Count pending intents with no attempt records (integrity issue)."""
|
|
2439
|
-
result = self.execute_one(
|
|
2440
|
-
"""
|
|
2441
|
-
SELECT COUNT(*) as count
|
|
2442
|
-
FROM tx_intents ti
|
|
2443
|
-
LEFT JOIN tx_attempts ta ON ti.intent_id = ta.intent_id
|
|
2444
|
-
WHERE ti.chain_id = %s
|
|
2445
|
-
AND ti.status = 'pending'
|
|
2446
|
-
AND ta.attempt_id IS NULL
|
|
2447
|
-
""",
|
|
2448
|
-
(chain_id,),
|
|
2449
|
-
)
|
|
2450
|
-
return result["count"] if result else 0
|
|
2451
|
-
|
|
2452
|
-
def count_stale_claims(self, chain_id: int, older_than_minutes: int = 10) -> int:
|
|
2453
|
-
"""Count intents stuck in CLAIMED for too long."""
|
|
2454
|
-
result = self.execute_one(
|
|
2455
|
-
"""
|
|
2456
|
-
SELECT COUNT(*) as count
|
|
2457
|
-
FROM tx_intents
|
|
2458
|
-
WHERE chain_id = %s
|
|
2459
|
-
AND status = 'claimed'
|
|
2460
|
-
AND claimed_at IS NOT NULL
|
|
2461
|
-
AND claimed_at < NOW() - make_interval(mins => %s)
|
|
2462
|
-
""",
|
|
2463
|
-
(chain_id, older_than_minutes),
|
|
2464
|
-
)
|
|
2465
|
-
return result["count"] if result else 0
|
|
2466
|
-
|
|
2467
|
-
# =========================================================================
|
|
2468
|
-
# Invariant Queries (Phase 2)
|
|
2469
|
-
# =========================================================================
|
|
2470
|
-
|
|
2471
|
-
def count_stuck_claimed(self, chain_id: int, older_than_minutes: int = 10) -> int:
|
|
2472
|
-
"""Count intents stuck in CLAIMED status for too long."""
|
|
2473
|
-
row = self.execute_one(
|
|
2474
|
-
"""
|
|
2475
|
-
SELECT COUNT(*) as count
|
|
2476
|
-
FROM tx_intents
|
|
2477
|
-
WHERE chain_id = %s
|
|
2478
|
-
AND status = 'claimed'
|
|
2479
|
-
AND claimed_at < NOW() - make_interval(mins => %s)
|
|
2480
|
-
""",
|
|
2481
|
-
(chain_id, older_than_minutes),
|
|
2482
|
-
)
|
|
2483
|
-
return row["count"] if row else 0
|
|
2484
|
-
|
|
2485
|
-
def count_orphaned_claims(self, chain_id: int) -> int:
|
|
2486
|
-
"""Count intents with claim_token set but status != claimed."""
|
|
2487
|
-
row = self.execute_one(
|
|
2488
|
-
"""
|
|
2489
|
-
SELECT COUNT(*) as count
|
|
2490
|
-
FROM tx_intents
|
|
2491
|
-
WHERE chain_id = %s
|
|
2492
|
-
AND status != 'claimed'
|
|
2493
|
-
AND claim_token IS NOT NULL
|
|
2494
|
-
""",
|
|
2495
|
-
(chain_id,),
|
|
2496
|
-
)
|
|
2497
|
-
return row["count"] if row else 0
|
|
2498
|
-
|
|
2499
|
-
def count_orphaned_nonces(self, chain_id: int) -> int:
|
|
2500
|
-
"""Count reserved/in_flight nonces for failed/abandoned intents."""
|
|
2501
|
-
row = self.execute_one(
|
|
2502
|
-
"""
|
|
2503
|
-
SELECT COUNT(*) as count
|
|
2504
|
-
FROM nonce_reservations nr
|
|
2505
|
-
JOIN tx_intents ti ON nr.intent_id = ti.intent_id
|
|
2506
|
-
WHERE nr.chain_id = %s
|
|
2507
|
-
AND nr.status IN ('reserved', 'in_flight')
|
|
2508
|
-
AND ti.status IN ('failed', 'abandoned', 'reverted')
|
|
2509
|
-
""",
|
|
2510
|
-
(chain_id,),
|
|
2511
|
-
)
|
|
2512
|
-
return row["count"] if row else 0
|
|
2513
|
-
|
|
2514
|
-
def get_oldest_nonce_gap_age_seconds(self, chain_id: int) -> float:
|
|
2515
|
-
"""Get age in seconds of the oldest nonce gap.
|
|
2516
|
-
|
|
2517
|
-
Anchors from signers (small table) for efficiency.
|
|
2518
|
-
Returns 0 if no gaps or if chain nonce not synced.
|
|
2519
|
-
"""
|
|
2520
|
-
row = self.execute_one(
|
|
2521
|
-
"""
|
|
2522
|
-
SELECT
|
|
2523
|
-
COALESCE(EXTRACT(EPOCH FROM (NOW() - MIN(nr.created_at))), 0) AS oldest_gap_seconds
|
|
2524
|
-
FROM signers s
|
|
2525
|
-
JOIN nonce_reservations nr
|
|
2526
|
-
ON nr.chain_id = s.chain_id
|
|
2527
|
-
AND nr.signer_address = s.signer_address
|
|
2528
|
-
WHERE s.chain_id = %s
|
|
2529
|
-
AND s.last_synced_chain_nonce IS NOT NULL
|
|
2530
|
-
AND nr.status IN ('reserved', 'in_flight')
|
|
2531
|
-
AND nr.nonce < s.last_synced_chain_nonce
|
|
2532
|
-
""",
|
|
2533
|
-
(chain_id,),
|
|
2534
|
-
)
|
|
2535
|
-
return float(row["oldest_gap_seconds"]) if row else 0.0
|