brawny 0.1.13__py3-none-any.whl → 0.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brawny/__init__.py +2 -0
- brawny/_context.py +5 -5
- brawny/_rpc/__init__.py +36 -12
- brawny/_rpc/broadcast.py +14 -13
- brawny/_rpc/caller.py +243 -0
- brawny/_rpc/client.py +539 -0
- brawny/_rpc/clients.py +11 -11
- brawny/_rpc/context.py +23 -0
- brawny/_rpc/errors.py +465 -31
- brawny/_rpc/gas.py +7 -6
- brawny/_rpc/pool.py +18 -0
- brawny/_rpc/retry.py +266 -0
- brawny/_rpc/retry_policy.py +81 -0
- brawny/accounts.py +28 -9
- brawny/alerts/__init__.py +15 -18
- brawny/alerts/abi_resolver.py +212 -36
- brawny/alerts/base.py +2 -2
- brawny/alerts/contracts.py +77 -10
- brawny/alerts/errors.py +30 -3
- brawny/alerts/events.py +38 -5
- brawny/alerts/health.py +19 -13
- brawny/alerts/send.py +513 -55
- brawny/api.py +39 -11
- brawny/assets/AGENTS.md +325 -0
- brawny/async_runtime.py +48 -0
- brawny/chain.py +3 -3
- brawny/cli/commands/__init__.py +2 -0
- brawny/cli/commands/console.py +69 -19
- brawny/cli/commands/contract.py +2 -2
- brawny/cli/commands/controls.py +121 -0
- brawny/cli/commands/health.py +2 -2
- brawny/cli/commands/job_dev.py +6 -5
- brawny/cli/commands/jobs.py +99 -2
- brawny/cli/commands/maintenance.py +13 -29
- brawny/cli/commands/migrate.py +1 -0
- brawny/cli/commands/run.py +10 -3
- brawny/cli/commands/script.py +8 -3
- brawny/cli/commands/signer.py +143 -26
- brawny/cli/helpers.py +0 -3
- brawny/cli_templates.py +25 -349
- brawny/config/__init__.py +4 -1
- brawny/config/models.py +43 -57
- brawny/config/parser.py +268 -57
- brawny/config/validation.py +52 -15
- brawny/daemon/context.py +4 -2
- brawny/daemon/core.py +185 -63
- brawny/daemon/loops.py +166 -98
- brawny/daemon/supervisor.py +261 -0
- brawny/db/__init__.py +14 -26
- brawny/db/base.py +248 -151
- brawny/db/global_cache.py +11 -1
- brawny/db/migrate.py +175 -28
- brawny/db/migrations/001_init.sql +4 -3
- brawny/db/migrations/010_add_nonce_gap_index.sql +1 -1
- brawny/db/migrations/011_add_job_logs.sql +1 -2
- brawny/db/migrations/012_add_claimed_by.sql +2 -2
- brawny/db/migrations/013_attempt_unique.sql +10 -0
- brawny/db/migrations/014_add_lease_expires_at.sql +5 -0
- brawny/db/migrations/015_add_signer_alias.sql +14 -0
- brawny/db/migrations/016_runtime_controls_and_quarantine.sql +32 -0
- brawny/db/migrations/017_add_job_drain.sql +6 -0
- brawny/db/migrations/018_add_nonce_reset_audit.sql +20 -0
- brawny/db/migrations/019_add_job_cooldowns.sql +8 -0
- brawny/db/migrations/020_attempt_unique_initial.sql +7 -0
- brawny/db/ops/__init__.py +3 -25
- brawny/db/ops/logs.py +1 -2
- brawny/db/queries.py +47 -91
- brawny/db/serialized.py +65 -0
- brawny/db/sqlite/__init__.py +1001 -0
- brawny/db/sqlite/connection.py +231 -0
- brawny/db/sqlite/execute.py +116 -0
- brawny/db/sqlite/mappers.py +190 -0
- brawny/db/sqlite/repos/attempts.py +372 -0
- brawny/db/sqlite/repos/block_state.py +102 -0
- brawny/db/sqlite/repos/cache.py +104 -0
- brawny/db/sqlite/repos/intents.py +1021 -0
- brawny/db/sqlite/repos/jobs.py +200 -0
- brawny/db/sqlite/repos/maintenance.py +182 -0
- brawny/db/sqlite/repos/signers_nonces.py +566 -0
- brawny/db/sqlite/tx.py +119 -0
- brawny/http.py +194 -0
- brawny/invariants.py +11 -24
- brawny/jobs/base.py +8 -0
- brawny/jobs/job_validation.py +2 -1
- brawny/keystore.py +83 -7
- brawny/lifecycle.py +64 -12
- brawny/logging.py +0 -2
- brawny/metrics.py +84 -12
- brawny/model/contexts.py +111 -9
- brawny/model/enums.py +1 -0
- brawny/model/errors.py +18 -0
- brawny/model/types.py +47 -131
- brawny/network_guard.py +133 -0
- brawny/networks/__init__.py +5 -5
- brawny/networks/config.py +1 -7
- brawny/networks/manager.py +14 -11
- brawny/runtime_controls.py +74 -0
- brawny/scheduler/poller.py +11 -7
- brawny/scheduler/reorg.py +95 -39
- brawny/scheduler/runner.py +442 -168
- brawny/scheduler/shutdown.py +3 -3
- brawny/script_tx.py +3 -3
- brawny/telegram.py +53 -7
- brawny/testing.py +1 -0
- brawny/timeout.py +38 -0
- brawny/tx/executor.py +922 -308
- brawny/tx/intent.py +54 -16
- brawny/tx/monitor.py +31 -12
- brawny/tx/nonce.py +212 -90
- brawny/tx/replacement.py +69 -18
- brawny/tx/retry_policy.py +24 -0
- brawny/tx/stages/types.py +75 -0
- brawny/types.py +18 -0
- brawny/utils.py +41 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/METADATA +3 -3
- brawny-0.1.22.dist-info/RECORD +163 -0
- brawny/_rpc/manager.py +0 -982
- brawny/_rpc/selector.py +0 -156
- brawny/db/base_new.py +0 -165
- brawny/db/mappers.py +0 -182
- brawny/db/migrations/008_add_transactions.sql +0 -72
- brawny/db/ops/attempts.py +0 -108
- brawny/db/ops/blocks.py +0 -83
- brawny/db/ops/cache.py +0 -93
- brawny/db/ops/intents.py +0 -296
- brawny/db/ops/jobs.py +0 -110
- brawny/db/ops/nonces.py +0 -322
- brawny/db/postgres.py +0 -2535
- brawny/db/postgres_new.py +0 -196
- brawny/db/sqlite.py +0 -2733
- brawny/db/sqlite_new.py +0 -191
- brawny-0.1.13.dist-info/RECORD +0 -141
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/WHEEL +0 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/entry_points.txt +0 -0
- {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/top_level.txt +0 -0
brawny/db/migrate.py
CHANGED
|
@@ -1,7 +1,4 @@
|
|
|
1
|
-
"""Database migration management for brawny.
|
|
2
|
-
|
|
3
|
-
Handles schema migrations for both PostgreSQL and SQLite.
|
|
4
|
-
"""
|
|
1
|
+
"""Database migration management for brawny (SQLite-only)."""
|
|
5
2
|
|
|
6
3
|
from __future__ import annotations
|
|
7
4
|
|
|
@@ -12,12 +9,14 @@ from pathlib import Path
|
|
|
12
9
|
from typing import TYPE_CHECKING
|
|
13
10
|
|
|
14
11
|
from brawny.model.errors import DatabaseError
|
|
12
|
+
from brawny.logging import get_logger
|
|
15
13
|
|
|
16
14
|
if TYPE_CHECKING:
|
|
17
15
|
from brawny.db.base import Database
|
|
18
16
|
|
|
19
17
|
|
|
20
18
|
MIGRATIONS_DIR = Path(__file__).parent / "migrations"
|
|
19
|
+
logger = get_logger(__name__)
|
|
21
20
|
|
|
22
21
|
|
|
23
22
|
@dataclass
|
|
@@ -85,7 +84,6 @@ def get_applied_migrations(db: Database) -> set[str]:
|
|
|
85
84
|
Set of version strings that have been applied
|
|
86
85
|
"""
|
|
87
86
|
try:
|
|
88
|
-
# Check if migrations table exists
|
|
89
87
|
result = db.execute_returning(
|
|
90
88
|
"""
|
|
91
89
|
SELECT version, applied_at
|
|
@@ -94,9 +92,21 @@ def get_applied_migrations(db: Database) -> set[str]:
|
|
|
94
92
|
"""
|
|
95
93
|
)
|
|
96
94
|
return {row["version"] for row in result}
|
|
97
|
-
except Exception:
|
|
98
|
-
|
|
99
|
-
|
|
95
|
+
except Exception as e:
|
|
96
|
+
db_info = _db_context(db)
|
|
97
|
+
if _is_missing_schema_migrations(e):
|
|
98
|
+
logger.warning(
|
|
99
|
+
"migrations.schema_missing",
|
|
100
|
+
db=db_info,
|
|
101
|
+
error=str(e)[:200],
|
|
102
|
+
)
|
|
103
|
+
return set()
|
|
104
|
+
logger.warning(
|
|
105
|
+
"migrations.schema_read_failed",
|
|
106
|
+
db=db_info,
|
|
107
|
+
error=str(e)[:200],
|
|
108
|
+
)
|
|
109
|
+
raise DatabaseError("Failed to read schema_migrations table") from e
|
|
100
110
|
|
|
101
111
|
|
|
102
112
|
def get_pending_migrations(db: Database) -> list[Migration]:
|
|
@@ -134,8 +144,21 @@ def get_migration_status(db: Database) -> list[Migration]:
|
|
|
134
144
|
)
|
|
135
145
|
for row in result:
|
|
136
146
|
applied[row["version"]] = row["applied_at"]
|
|
137
|
-
except Exception:
|
|
138
|
-
|
|
147
|
+
except Exception as e:
|
|
148
|
+
db_info = _db_context(db)
|
|
149
|
+
if _is_missing_schema_migrations(e):
|
|
150
|
+
logger.warning(
|
|
151
|
+
"migrations.schema_missing",
|
|
152
|
+
db=db_info,
|
|
153
|
+
error=str(e)[:200],
|
|
154
|
+
)
|
|
155
|
+
else:
|
|
156
|
+
logger.warning(
|
|
157
|
+
"migrations.schema_read_failed",
|
|
158
|
+
db=db_info,
|
|
159
|
+
error=str(e)[:200],
|
|
160
|
+
)
|
|
161
|
+
raise DatabaseError("Failed to read schema_migrations table") from e
|
|
139
162
|
|
|
140
163
|
all_migrations = discover_migrations()
|
|
141
164
|
for migration in all_migrations:
|
|
@@ -156,7 +179,7 @@ def run_migration(db: Database, migration: Migration) -> None:
|
|
|
156
179
|
DatabaseError: If migration fails
|
|
157
180
|
"""
|
|
158
181
|
try:
|
|
159
|
-
if
|
|
182
|
+
if migration.version == "012":
|
|
160
183
|
with db.transaction():
|
|
161
184
|
existing = {
|
|
162
185
|
r["name"].lower()
|
|
@@ -180,6 +203,30 @@ def run_migration(db: Database, migration: Migration) -> None:
|
|
|
180
203
|
(migration.version,),
|
|
181
204
|
)
|
|
182
205
|
return
|
|
206
|
+
if migration.version == "014":
|
|
207
|
+
with db.transaction():
|
|
208
|
+
existing = {
|
|
209
|
+
r["name"].lower()
|
|
210
|
+
for r in db.execute_returning("PRAGMA table_info(tx_intents)")
|
|
211
|
+
}
|
|
212
|
+
if "lease_expires_at" in existing:
|
|
213
|
+
db.execute(
|
|
214
|
+
"INSERT OR IGNORE INTO schema_migrations (version) VALUES (?)",
|
|
215
|
+
(migration.version,),
|
|
216
|
+
)
|
|
217
|
+
return
|
|
218
|
+
try:
|
|
219
|
+
db.execute(
|
|
220
|
+
"ALTER TABLE tx_intents ADD COLUMN lease_expires_at TIMESTAMP"
|
|
221
|
+
)
|
|
222
|
+
except Exception as exc:
|
|
223
|
+
if "duplicate column name" not in str(exc).lower():
|
|
224
|
+
raise
|
|
225
|
+
db.execute(
|
|
226
|
+
"INSERT OR IGNORE INTO schema_migrations (version) VALUES (?)",
|
|
227
|
+
(migration.version,),
|
|
228
|
+
)
|
|
229
|
+
return
|
|
183
230
|
|
|
184
231
|
with db.transaction():
|
|
185
232
|
# Split SQL into individual statements for SQLite compatibility
|
|
@@ -263,6 +310,19 @@ def _split_sql_statements(sql: str) -> list[str]:
|
|
|
263
310
|
return statements
|
|
264
311
|
|
|
265
312
|
|
|
313
|
+
def _is_missing_schema_migrations(error: Exception) -> bool:
|
|
314
|
+
message = str(error).lower()
|
|
315
|
+
return "no such table" in message and "schema_migrations" in message
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
def _db_context(db: Database) -> dict[str, str]:
|
|
319
|
+
info: dict[str, str] = {"dialect": getattr(db, "dialect", "unknown")}
|
|
320
|
+
db_path = getattr(db, "_database_path", None)
|
|
321
|
+
if isinstance(db_path, str):
|
|
322
|
+
info["path"] = db_path
|
|
323
|
+
return info
|
|
324
|
+
|
|
325
|
+
|
|
266
326
|
def run_pending_migrations(db: Database) -> list[Migration]:
|
|
267
327
|
"""Run all pending migrations.
|
|
268
328
|
|
|
@@ -281,32 +341,119 @@ def run_pending_migrations(db: Database) -> list[Migration]:
|
|
|
281
341
|
run_migration(db, migration)
|
|
282
342
|
migration.applied_at = datetime.utcnow()
|
|
283
343
|
|
|
284
|
-
verify_critical_schema(db)
|
|
285
|
-
|
|
286
344
|
return pending
|
|
287
345
|
|
|
288
346
|
|
|
289
347
|
def verify_critical_schema(db: Database) -> None:
|
|
290
348
|
"""Hard-fail if critical columns missing. Runs for daemon + CLI."""
|
|
291
|
-
#
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
349
|
+
# Keep the list short and hot-path only.
|
|
350
|
+
required_by_table: dict[str, set[str]] = {
|
|
351
|
+
"tx_intents": {
|
|
352
|
+
"intent_id",
|
|
353
|
+
"status",
|
|
354
|
+
"claim_token",
|
|
355
|
+
"claimed_at",
|
|
356
|
+
"claimed_by",
|
|
357
|
+
"lease_expires_at",
|
|
358
|
+
},
|
|
359
|
+
"tx_attempts": {
|
|
360
|
+
"attempt_id",
|
|
361
|
+
"intent_id",
|
|
362
|
+
"status",
|
|
363
|
+
"created_at",
|
|
364
|
+
},
|
|
365
|
+
"nonce_reservations": {
|
|
366
|
+
"id",
|
|
367
|
+
"chain_id",
|
|
368
|
+
"signer_address",
|
|
369
|
+
"nonce",
|
|
370
|
+
"status",
|
|
371
|
+
"intent_id",
|
|
372
|
+
"created_at",
|
|
373
|
+
"updated_at",
|
|
374
|
+
},
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
missing_by_table: dict[str, set[str]] = {}
|
|
378
|
+
for table_name, required in required_by_table.items():
|
|
379
|
+
rows = db.execute_returning(f"PRAGMA table_info({table_name})")
|
|
296
380
|
existing = {r["name"].lower() for r in rows}
|
|
297
|
-
|
|
381
|
+
missing = {col for col in required if col.lower() not in existing}
|
|
382
|
+
if missing:
|
|
383
|
+
missing_by_table[table_name] = missing
|
|
384
|
+
|
|
385
|
+
if not missing_by_table:
|
|
386
|
+
return
|
|
387
|
+
|
|
388
|
+
expected_version = _latest_migration_version()
|
|
389
|
+
current_version, current_version_note = _read_schema_version(db)
|
|
390
|
+
remediation = _schema_remediation(expected_version, current_version, current_version_note)
|
|
391
|
+
|
|
392
|
+
missing_details = "; ".join(
|
|
393
|
+
f"{table}: {', '.join(sorted(cols))}"
|
|
394
|
+
for table, cols in sorted(missing_by_table.items())
|
|
395
|
+
)
|
|
396
|
+
message_parts = [
|
|
397
|
+
f"Missing critical columns: {missing_details}.",
|
|
398
|
+
]
|
|
399
|
+
if expected_version:
|
|
400
|
+
message_parts.append(f"Expected schema version: {expected_version}.")
|
|
401
|
+
if current_version:
|
|
402
|
+
message_parts.append(f"Current schema version: {current_version}.")
|
|
403
|
+
elif current_version_note:
|
|
404
|
+
message_parts.append(f"Current schema version: unknown ({current_version_note}).")
|
|
405
|
+
message_parts.append(f"Remediation: {remediation}.")
|
|
406
|
+
raise RuntimeError(" ".join(message_parts))
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
def _latest_migration_version() -> str | None:
|
|
410
|
+
migrations = discover_migrations()
|
|
411
|
+
if not migrations:
|
|
412
|
+
return None
|
|
413
|
+
return max(migrations, key=lambda m: int(m.version)).version
|
|
414
|
+
|
|
415
|
+
|
|
416
|
+
def _read_schema_version(db: Database) -> tuple[str | None, str | None]:
|
|
417
|
+
try:
|
|
298
418
|
rows = db.execute_returning(
|
|
299
|
-
"SELECT
|
|
300
|
-
"WHERE table_schema = current_schema() AND table_name = 'tx_intents'"
|
|
419
|
+
"SELECT version FROM schema_migrations ORDER BY version DESC LIMIT 1"
|
|
301
420
|
)
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
421
|
+
except Exception as exc:
|
|
422
|
+
if _is_missing_schema_migrations(exc):
|
|
423
|
+
return None, "schema_migrations missing"
|
|
424
|
+
logger.warning(
|
|
425
|
+
"migrations.schema_read_failed",
|
|
426
|
+
db=_db_context(db),
|
|
427
|
+
error=str(exc)[:200],
|
|
309
428
|
)
|
|
429
|
+
return None, "schema_migrations unreadable"
|
|
430
|
+
|
|
431
|
+
if not rows:
|
|
432
|
+
return None, "schema_migrations empty"
|
|
433
|
+
return str(rows[0]["version"]), None
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
def _schema_remediation(
|
|
437
|
+
expected_version: str | None,
|
|
438
|
+
current_version: str | None,
|
|
439
|
+
current_version_note: str | None,
|
|
440
|
+
) -> str:
|
|
441
|
+
if current_version_note is not None:
|
|
442
|
+
return "DB appears out-of-band; delete/re-init or run a repair script if available"
|
|
443
|
+
if expected_version and current_version:
|
|
444
|
+
expected_int = _parse_version_int(expected_version)
|
|
445
|
+
current_int = _parse_version_int(current_version)
|
|
446
|
+
if expected_int is not None and current_int is not None:
|
|
447
|
+
if current_int < expected_int:
|
|
448
|
+
return "run `brawny db migrate`"
|
|
449
|
+
return "DB appears out-of-band; delete/re-init or run a repair script if available"
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
def _parse_version_int(version: str) -> int | None:
|
|
453
|
+
try:
|
|
454
|
+
return int(version)
|
|
455
|
+
except ValueError:
|
|
456
|
+
return None
|
|
310
457
|
|
|
311
458
|
|
|
312
459
|
class Migrator:
|
|
@@ -25,7 +25,7 @@ CREATE TABLE IF NOT EXISTS block_state (
|
|
|
25
25
|
-- 2. Block Hash History - for reorg detection
|
|
26
26
|
-- ============================================================================
|
|
27
27
|
CREATE TABLE IF NOT EXISTS block_hash_history (
|
|
28
|
-
id
|
|
28
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
29
29
|
chain_id INTEGER NOT NULL,
|
|
30
30
|
block_number BIGINT NOT NULL,
|
|
31
31
|
block_hash VARCHAR(66) NOT NULL,
|
|
@@ -81,7 +81,7 @@ CREATE TABLE IF NOT EXISTS signers (
|
|
|
81
81
|
-- 6. Nonce Reservations - tracks nonce allocation and status
|
|
82
82
|
-- ============================================================================
|
|
83
83
|
CREATE TABLE IF NOT EXISTS nonce_reservations (
|
|
84
|
-
id
|
|
84
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
85
85
|
chain_id INTEGER NOT NULL,
|
|
86
86
|
signer_address VARCHAR(42) NOT NULL,
|
|
87
87
|
nonce BIGINT NOT NULL,
|
|
@@ -118,6 +118,7 @@ CREATE TABLE IF NOT EXISTS tx_intents (
|
|
|
118
118
|
claim_token VARCHAR(100),
|
|
119
119
|
claimed_at TIMESTAMP,
|
|
120
120
|
claimed_by VARCHAR(200),
|
|
121
|
+
lease_expires_at TIMESTAMP,
|
|
121
122
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
122
123
|
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
123
124
|
);
|
|
@@ -141,7 +142,7 @@ CREATE TABLE IF NOT EXISTS tx_attempts (
|
|
|
141
142
|
tx_hash VARCHAR(66),
|
|
142
143
|
gas_params_json TEXT NOT NULL,
|
|
143
144
|
status VARCHAR(20) NOT NULL DEFAULT 'signed' CHECK (
|
|
144
|
-
status IN ('signed', 'broadcast', 'pending', 'confirmed', 'failed', 'replaced')
|
|
145
|
+
status IN ('signed', 'pending_send', 'broadcast', 'pending', 'confirmed', 'failed', 'replaced')
|
|
145
146
|
),
|
|
146
147
|
error_code VARCHAR(100),
|
|
147
148
|
error_detail TEXT,
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
-- Add composite index for efficient nonce gap age queries
|
|
2
2
|
-- Makes the MIN(created_at) scan an index walk instead of heap scan
|
|
3
3
|
-- Version: 010
|
|
4
|
-
-- Note: CONCURRENTLY removed for SQLite compatibility
|
|
4
|
+
-- Note: CONCURRENTLY removed for SQLite compatibility
|
|
5
5
|
|
|
6
6
|
CREATE INDEX IF NOT EXISTS idx_nonce_res_chain_signer_status_nonce_created
|
|
7
7
|
ON nonce_reservations (chain_id, signer_address, status, nonce, created_at);
|
|
@@ -1,9 +1,8 @@
|
|
|
1
1
|
-- Job logs for operator-visible snapshots during check()
|
|
2
2
|
-- Version: 011
|
|
3
|
-
-- Note: SERIAL becomes INTEGER on SQLite (handled by sqlite.py adapter)
|
|
4
3
|
|
|
5
4
|
CREATE TABLE IF NOT EXISTS job_logs (
|
|
6
|
-
id
|
|
5
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
7
6
|
chain_id INTEGER NOT NULL,
|
|
8
7
|
job_id VARCHAR(200) NOT NULL,
|
|
9
8
|
block_number BIGINT,
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
--
|
|
2
|
-
ALTER TABLE tx_intents ADD COLUMN
|
|
1
|
+
-- SQLite handled in migrator (idempotent)
|
|
2
|
+
ALTER TABLE tx_intents ADD COLUMN claimed_by VARCHAR(200);
|
|
3
3
|
|
|
4
4
|
INSERT INTO schema_migrations (version) VALUES ('012')
|
|
5
5
|
ON CONFLICT (version) DO NOTHING;
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
-- Add binding identifier for attempts and enforce uniqueness per binding
|
|
2
|
+
|
|
3
|
+
ALTER TABLE tx_intents ADD COLUMN broadcast_binding_id UUID;
|
|
4
|
+
ALTER TABLE tx_attempts ADD COLUMN endpoint_binding_id UUID;
|
|
5
|
+
|
|
6
|
+
CREATE UNIQUE INDEX IF NOT EXISTS uq_tx_attempts_intent_nonce_binding_hash
|
|
7
|
+
ON tx_attempts(intent_id, nonce, endpoint_binding_id, tx_hash)
|
|
8
|
+
WHERE tx_hash IS NOT NULL AND endpoint_binding_id IS NOT NULL;
|
|
9
|
+
|
|
10
|
+
INSERT INTO schema_migrations (version) VALUES ('013');
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
-- Add signer_alias columns for intent/transaction auditing
|
|
2
|
+
-- Note: SQLite does not support "IF NOT EXISTS" for ALTER TABLE ADD COLUMN
|
|
3
|
+
|
|
4
|
+
ALTER TABLE tx_intents ADD COLUMN signer_alias VARCHAR(200);
|
|
5
|
+
|
|
6
|
+
-- Normalize existing address data to lowercase canonical form.
|
|
7
|
+
UPDATE signers SET signer_address = lower(signer_address);
|
|
8
|
+
UPDATE nonce_reservations SET signer_address = lower(signer_address);
|
|
9
|
+
UPDATE tx_intents SET signer_address = lower(signer_address), to_address = lower(to_address);
|
|
10
|
+
UPDATE abi_cache SET address = lower(address);
|
|
11
|
+
UPDATE proxy_cache SET proxy_address = lower(proxy_address),
|
|
12
|
+
implementation_address = lower(implementation_address);
|
|
13
|
+
|
|
14
|
+
INSERT INTO schema_migrations (version) VALUES ('015');
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
-- Description: Add signer quarantine, runtime controls, and mutation audit
|
|
2
|
+
|
|
3
|
+
-- Signer containment
|
|
4
|
+
ALTER TABLE signers ADD COLUMN quarantined_at TIMESTAMP;
|
|
5
|
+
ALTER TABLE signers ADD COLUMN quarantine_reason TEXT;
|
|
6
|
+
ALTER TABLE signers ADD COLUMN replacements_paused INTEGER NOT NULL DEFAULT 0;
|
|
7
|
+
|
|
8
|
+
-- Runtime controls (containment with TTL)
|
|
9
|
+
CREATE TABLE IF NOT EXISTS runtime_controls (
|
|
10
|
+
control VARCHAR(100) PRIMARY KEY,
|
|
11
|
+
active INTEGER NOT NULL DEFAULT 0,
|
|
12
|
+
expires_at TIMESTAMP,
|
|
13
|
+
reason TEXT,
|
|
14
|
+
actor TEXT,
|
|
15
|
+
mode VARCHAR(20) NOT NULL DEFAULT 'auto',
|
|
16
|
+
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
17
|
+
);
|
|
18
|
+
|
|
19
|
+
-- Durable mutation audit (minimal, append-only)
|
|
20
|
+
CREATE TABLE IF NOT EXISTS mutation_audit (
|
|
21
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
22
|
+
entity_type VARCHAR(50) NOT NULL,
|
|
23
|
+
entity_id TEXT NOT NULL,
|
|
24
|
+
action VARCHAR(100) NOT NULL,
|
|
25
|
+
actor TEXT,
|
|
26
|
+
reason TEXT,
|
|
27
|
+
source TEXT,
|
|
28
|
+
metadata_json TEXT,
|
|
29
|
+
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
30
|
+
);
|
|
31
|
+
|
|
32
|
+
INSERT INTO schema_migrations (version) VALUES ('016');
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
-- Audit table for nonce force resets
|
|
2
|
+
-- Provides durable record of destructive nonce operations for incident investigation
|
|
3
|
+
|
|
4
|
+
CREATE TABLE IF NOT EXISTS nonce_reset_audit (
|
|
5
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
6
|
+
chain_id INTEGER NOT NULL,
|
|
7
|
+
signer_address TEXT NOT NULL,
|
|
8
|
+
old_next_nonce INTEGER,
|
|
9
|
+
new_next_nonce INTEGER NOT NULL,
|
|
10
|
+
released_reservations INTEGER NOT NULL DEFAULT 0,
|
|
11
|
+
source TEXT NOT NULL, -- 'cli', 'executor', 'api'
|
|
12
|
+
reason TEXT,
|
|
13
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
14
|
+
);
|
|
15
|
+
|
|
16
|
+
CREATE INDEX IF NOT EXISTS idx_nonce_reset_audit_signer
|
|
17
|
+
ON nonce_reset_audit(chain_id, signer_address);
|
|
18
|
+
|
|
19
|
+
CREATE INDEX IF NOT EXISTS idx_nonce_reset_audit_created
|
|
20
|
+
ON nonce_reset_audit(created_at);
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
-- Enforce single initial attempt per intent+nonce (replacements allowed)
|
|
2
|
+
|
|
3
|
+
CREATE UNIQUE INDEX IF NOT EXISTS uq_tx_attempts_intent_nonce_initial
|
|
4
|
+
ON tx_attempts(intent_id, nonce)
|
|
5
|
+
WHERE replaces_attempt_id IS NULL;
|
|
6
|
+
|
|
7
|
+
INSERT INTO schema_migrations (version) VALUES ('020');
|
brawny/db/ops/__init__.py
CHANGED
|
@@ -1,29 +1,7 @@
|
|
|
1
|
-
"""Database operations modules.
|
|
2
|
-
|
|
3
|
-
Flat function-based operations for each domain:
|
|
4
|
-
- blocks: Block state and hash history
|
|
5
|
-
- jobs: Job configuration and KV store
|
|
6
|
-
- intents: Transaction intents
|
|
7
|
-
- attempts: Transaction attempts
|
|
8
|
-
- nonces: Signer state and nonce reservations
|
|
9
|
-
- cache: ABI and proxy cache
|
|
10
|
-
|
|
11
|
-
Usage:
|
|
12
|
-
from brawny.db import ops
|
|
13
|
-
|
|
14
|
-
# Use functions from specific modules
|
|
15
|
-
state = ops.blocks.get_block_state(db, chain_id)
|
|
16
|
-
job = ops.jobs.get_job(db, job_id)
|
|
17
|
-
intent = ops.intents.create_intent(db, ...)
|
|
18
|
-
"""
|
|
1
|
+
"""Database operations modules."""
|
|
19
2
|
|
|
20
3
|
from __future__ import annotations
|
|
21
4
|
|
|
22
|
-
from brawny.db.ops import
|
|
23
|
-
from brawny.db.ops import jobs
|
|
24
|
-
from brawny.db.ops import intents
|
|
25
|
-
from brawny.db.ops import attempts
|
|
26
|
-
from brawny.db.ops import nonces
|
|
27
|
-
from brawny.db.ops import cache
|
|
5
|
+
from brawny.db.ops import logs
|
|
28
6
|
|
|
29
|
-
__all__ = ["
|
|
7
|
+
__all__ = ["logs"]
|
brawny/db/ops/logs.py
CHANGED
|
@@ -71,8 +71,7 @@ def list_all_logs(
|
|
|
71
71
|
|
|
72
72
|
def list_latest_logs(db: "Database", chain_id: int) -> list[dict[str, Any]]:
|
|
73
73
|
"""Get the most recent log entry per job."""
|
|
74
|
-
|
|
75
|
-
rows = db.execute_returning(query, {"chain_id": chain_id})
|
|
74
|
+
rows = db.execute_returning(Q.LIST_LATEST_JOB_LOGS, {"chain_id": chain_id})
|
|
76
75
|
return [_row_to_log(row) for row in rows]
|
|
77
76
|
|
|
78
77
|
|