brawny 0.1.13__py3-none-any.whl → 0.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. brawny/__init__.py +2 -0
  2. brawny/_context.py +5 -5
  3. brawny/_rpc/__init__.py +36 -12
  4. brawny/_rpc/broadcast.py +14 -13
  5. brawny/_rpc/caller.py +243 -0
  6. brawny/_rpc/client.py +539 -0
  7. brawny/_rpc/clients.py +11 -11
  8. brawny/_rpc/context.py +23 -0
  9. brawny/_rpc/errors.py +465 -31
  10. brawny/_rpc/gas.py +7 -6
  11. brawny/_rpc/pool.py +18 -0
  12. brawny/_rpc/retry.py +266 -0
  13. brawny/_rpc/retry_policy.py +81 -0
  14. brawny/accounts.py +28 -9
  15. brawny/alerts/__init__.py +15 -18
  16. brawny/alerts/abi_resolver.py +212 -36
  17. brawny/alerts/base.py +2 -2
  18. brawny/alerts/contracts.py +77 -10
  19. brawny/alerts/errors.py +30 -3
  20. brawny/alerts/events.py +38 -5
  21. brawny/alerts/health.py +19 -13
  22. brawny/alerts/send.py +513 -55
  23. brawny/api.py +39 -11
  24. brawny/assets/AGENTS.md +325 -0
  25. brawny/async_runtime.py +48 -0
  26. brawny/chain.py +3 -3
  27. brawny/cli/commands/__init__.py +2 -0
  28. brawny/cli/commands/console.py +69 -19
  29. brawny/cli/commands/contract.py +2 -2
  30. brawny/cli/commands/controls.py +121 -0
  31. brawny/cli/commands/health.py +2 -2
  32. brawny/cli/commands/job_dev.py +6 -5
  33. brawny/cli/commands/jobs.py +99 -2
  34. brawny/cli/commands/maintenance.py +13 -29
  35. brawny/cli/commands/migrate.py +1 -0
  36. brawny/cli/commands/run.py +10 -3
  37. brawny/cli/commands/script.py +8 -3
  38. brawny/cli/commands/signer.py +143 -26
  39. brawny/cli/helpers.py +0 -3
  40. brawny/cli_templates.py +25 -349
  41. brawny/config/__init__.py +4 -1
  42. brawny/config/models.py +43 -57
  43. brawny/config/parser.py +268 -57
  44. brawny/config/validation.py +52 -15
  45. brawny/daemon/context.py +4 -2
  46. brawny/daemon/core.py +185 -63
  47. brawny/daemon/loops.py +166 -98
  48. brawny/daemon/supervisor.py +261 -0
  49. brawny/db/__init__.py +14 -26
  50. brawny/db/base.py +248 -151
  51. brawny/db/global_cache.py +11 -1
  52. brawny/db/migrate.py +175 -28
  53. brawny/db/migrations/001_init.sql +4 -3
  54. brawny/db/migrations/010_add_nonce_gap_index.sql +1 -1
  55. brawny/db/migrations/011_add_job_logs.sql +1 -2
  56. brawny/db/migrations/012_add_claimed_by.sql +2 -2
  57. brawny/db/migrations/013_attempt_unique.sql +10 -0
  58. brawny/db/migrations/014_add_lease_expires_at.sql +5 -0
  59. brawny/db/migrations/015_add_signer_alias.sql +14 -0
  60. brawny/db/migrations/016_runtime_controls_and_quarantine.sql +32 -0
  61. brawny/db/migrations/017_add_job_drain.sql +6 -0
  62. brawny/db/migrations/018_add_nonce_reset_audit.sql +20 -0
  63. brawny/db/migrations/019_add_job_cooldowns.sql +8 -0
  64. brawny/db/migrations/020_attempt_unique_initial.sql +7 -0
  65. brawny/db/ops/__init__.py +3 -25
  66. brawny/db/ops/logs.py +1 -2
  67. brawny/db/queries.py +47 -91
  68. brawny/db/serialized.py +65 -0
  69. brawny/db/sqlite/__init__.py +1001 -0
  70. brawny/db/sqlite/connection.py +231 -0
  71. brawny/db/sqlite/execute.py +116 -0
  72. brawny/db/sqlite/mappers.py +190 -0
  73. brawny/db/sqlite/repos/attempts.py +372 -0
  74. brawny/db/sqlite/repos/block_state.py +102 -0
  75. brawny/db/sqlite/repos/cache.py +104 -0
  76. brawny/db/sqlite/repos/intents.py +1021 -0
  77. brawny/db/sqlite/repos/jobs.py +200 -0
  78. brawny/db/sqlite/repos/maintenance.py +182 -0
  79. brawny/db/sqlite/repos/signers_nonces.py +566 -0
  80. brawny/db/sqlite/tx.py +119 -0
  81. brawny/http.py +194 -0
  82. brawny/invariants.py +11 -24
  83. brawny/jobs/base.py +8 -0
  84. brawny/jobs/job_validation.py +2 -1
  85. brawny/keystore.py +83 -7
  86. brawny/lifecycle.py +64 -12
  87. brawny/logging.py +0 -2
  88. brawny/metrics.py +84 -12
  89. brawny/model/contexts.py +111 -9
  90. brawny/model/enums.py +1 -0
  91. brawny/model/errors.py +18 -0
  92. brawny/model/types.py +47 -131
  93. brawny/network_guard.py +133 -0
  94. brawny/networks/__init__.py +5 -5
  95. brawny/networks/config.py +1 -7
  96. brawny/networks/manager.py +14 -11
  97. brawny/runtime_controls.py +74 -0
  98. brawny/scheduler/poller.py +11 -7
  99. brawny/scheduler/reorg.py +95 -39
  100. brawny/scheduler/runner.py +442 -168
  101. brawny/scheduler/shutdown.py +3 -3
  102. brawny/script_tx.py +3 -3
  103. brawny/telegram.py +53 -7
  104. brawny/testing.py +1 -0
  105. brawny/timeout.py +38 -0
  106. brawny/tx/executor.py +922 -308
  107. brawny/tx/intent.py +54 -16
  108. brawny/tx/monitor.py +31 -12
  109. brawny/tx/nonce.py +212 -90
  110. brawny/tx/replacement.py +69 -18
  111. brawny/tx/retry_policy.py +24 -0
  112. brawny/tx/stages/types.py +75 -0
  113. brawny/types.py +18 -0
  114. brawny/utils.py +41 -0
  115. {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/METADATA +3 -3
  116. brawny-0.1.22.dist-info/RECORD +163 -0
  117. brawny/_rpc/manager.py +0 -982
  118. brawny/_rpc/selector.py +0 -156
  119. brawny/db/base_new.py +0 -165
  120. brawny/db/mappers.py +0 -182
  121. brawny/db/migrations/008_add_transactions.sql +0 -72
  122. brawny/db/ops/attempts.py +0 -108
  123. brawny/db/ops/blocks.py +0 -83
  124. brawny/db/ops/cache.py +0 -93
  125. brawny/db/ops/intents.py +0 -296
  126. brawny/db/ops/jobs.py +0 -110
  127. brawny/db/ops/nonces.py +0 -322
  128. brawny/db/postgres.py +0 -2535
  129. brawny/db/postgres_new.py +0 -196
  130. brawny/db/sqlite.py +0 -2733
  131. brawny/db/sqlite_new.py +0 -191
  132. brawny-0.1.13.dist-info/RECORD +0 -141
  133. {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/WHEEL +0 -0
  134. {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/entry_points.txt +0 -0
  135. {brawny-0.1.13.dist-info → brawny-0.1.22.dist-info}/top_level.txt +0 -0
brawny/db/migrate.py CHANGED
@@ -1,7 +1,4 @@
1
- """Database migration management for brawny.
2
-
3
- Handles schema migrations for both PostgreSQL and SQLite.
4
- """
1
+ """Database migration management for brawny (SQLite-only)."""
5
2
 
6
3
  from __future__ import annotations
7
4
 
@@ -12,12 +9,14 @@ from pathlib import Path
12
9
  from typing import TYPE_CHECKING
13
10
 
14
11
  from brawny.model.errors import DatabaseError
12
+ from brawny.logging import get_logger
15
13
 
16
14
  if TYPE_CHECKING:
17
15
  from brawny.db.base import Database
18
16
 
19
17
 
20
18
  MIGRATIONS_DIR = Path(__file__).parent / "migrations"
19
+ logger = get_logger(__name__)
21
20
 
22
21
 
23
22
  @dataclass
@@ -85,7 +84,6 @@ def get_applied_migrations(db: Database) -> set[str]:
85
84
  Set of version strings that have been applied
86
85
  """
87
86
  try:
88
- # Check if migrations table exists
89
87
  result = db.execute_returning(
90
88
  """
91
89
  SELECT version, applied_at
@@ -94,9 +92,21 @@ def get_applied_migrations(db: Database) -> set[str]:
94
92
  """
95
93
  )
96
94
  return {row["version"] for row in result}
97
- except Exception:
98
- # Table doesn't exist yet
99
- return set()
95
+ except Exception as e:
96
+ db_info = _db_context(db)
97
+ if _is_missing_schema_migrations(e):
98
+ logger.warning(
99
+ "migrations.schema_missing",
100
+ db=db_info,
101
+ error=str(e)[:200],
102
+ )
103
+ return set()
104
+ logger.warning(
105
+ "migrations.schema_read_failed",
106
+ db=db_info,
107
+ error=str(e)[:200],
108
+ )
109
+ raise DatabaseError("Failed to read schema_migrations table") from e
100
110
 
101
111
 
102
112
  def get_pending_migrations(db: Database) -> list[Migration]:
@@ -134,8 +144,21 @@ def get_migration_status(db: Database) -> list[Migration]:
134
144
  )
135
145
  for row in result:
136
146
  applied[row["version"]] = row["applied_at"]
137
- except Exception:
138
- pass
147
+ except Exception as e:
148
+ db_info = _db_context(db)
149
+ if _is_missing_schema_migrations(e):
150
+ logger.warning(
151
+ "migrations.schema_missing",
152
+ db=db_info,
153
+ error=str(e)[:200],
154
+ )
155
+ else:
156
+ logger.warning(
157
+ "migrations.schema_read_failed",
158
+ db=db_info,
159
+ error=str(e)[:200],
160
+ )
161
+ raise DatabaseError("Failed to read schema_migrations table") from e
139
162
 
140
163
  all_migrations = discover_migrations()
141
164
  for migration in all_migrations:
@@ -156,7 +179,7 @@ def run_migration(db: Database, migration: Migration) -> None:
156
179
  DatabaseError: If migration fails
157
180
  """
158
181
  try:
159
- if db.dialect == "sqlite" and migration.version == "012":
182
+ if migration.version == "012":
160
183
  with db.transaction():
161
184
  existing = {
162
185
  r["name"].lower()
@@ -180,6 +203,30 @@ def run_migration(db: Database, migration: Migration) -> None:
180
203
  (migration.version,),
181
204
  )
182
205
  return
206
+ if migration.version == "014":
207
+ with db.transaction():
208
+ existing = {
209
+ r["name"].lower()
210
+ for r in db.execute_returning("PRAGMA table_info(tx_intents)")
211
+ }
212
+ if "lease_expires_at" in existing:
213
+ db.execute(
214
+ "INSERT OR IGNORE INTO schema_migrations (version) VALUES (?)",
215
+ (migration.version,),
216
+ )
217
+ return
218
+ try:
219
+ db.execute(
220
+ "ALTER TABLE tx_intents ADD COLUMN lease_expires_at TIMESTAMP"
221
+ )
222
+ except Exception as exc:
223
+ if "duplicate column name" not in str(exc).lower():
224
+ raise
225
+ db.execute(
226
+ "INSERT OR IGNORE INTO schema_migrations (version) VALUES (?)",
227
+ (migration.version,),
228
+ )
229
+ return
183
230
 
184
231
  with db.transaction():
185
232
  # Split SQL into individual statements for SQLite compatibility
@@ -263,6 +310,19 @@ def _split_sql_statements(sql: str) -> list[str]:
263
310
  return statements
264
311
 
265
312
 
313
+ def _is_missing_schema_migrations(error: Exception) -> bool:
314
+ message = str(error).lower()
315
+ return "no such table" in message and "schema_migrations" in message
316
+
317
+
318
+ def _db_context(db: Database) -> dict[str, str]:
319
+ info: dict[str, str] = {"dialect": getattr(db, "dialect", "unknown")}
320
+ db_path = getattr(db, "_database_path", None)
321
+ if isinstance(db_path, str):
322
+ info["path"] = db_path
323
+ return info
324
+
325
+
266
326
  def run_pending_migrations(db: Database) -> list[Migration]:
267
327
  """Run all pending migrations.
268
328
 
@@ -281,32 +341,119 @@ def run_pending_migrations(db: Database) -> list[Migration]:
281
341
  run_migration(db, migration)
282
342
  migration.applied_at = datetime.utcnow()
283
343
 
284
- verify_critical_schema(db)
285
-
286
344
  return pending
287
345
 
288
346
 
289
347
  def verify_critical_schema(db: Database) -> None:
290
348
  """Hard-fail if critical columns missing. Runs for daemon + CLI."""
291
- # If earlier schemas lacked any of these, reduce to the minimum needed for safe operation.
292
- required = {"intent_id", "status", "claim_token", "claimed_at", "claimed_by"}
293
-
294
- if db.dialect == "sqlite":
295
- rows = db.execute_returning("PRAGMA table_info(tx_intents)")
349
+ # Keep the list short and hot-path only.
350
+ required_by_table: dict[str, set[str]] = {
351
+ "tx_intents": {
352
+ "intent_id",
353
+ "status",
354
+ "claim_token",
355
+ "claimed_at",
356
+ "claimed_by",
357
+ "lease_expires_at",
358
+ },
359
+ "tx_attempts": {
360
+ "attempt_id",
361
+ "intent_id",
362
+ "status",
363
+ "created_at",
364
+ },
365
+ "nonce_reservations": {
366
+ "id",
367
+ "chain_id",
368
+ "signer_address",
369
+ "nonce",
370
+ "status",
371
+ "intent_id",
372
+ "created_at",
373
+ "updated_at",
374
+ },
375
+ }
376
+
377
+ missing_by_table: dict[str, set[str]] = {}
378
+ for table_name, required in required_by_table.items():
379
+ rows = db.execute_returning(f"PRAGMA table_info({table_name})")
296
380
  existing = {r["name"].lower() for r in rows}
297
- else:
381
+ missing = {col for col in required if col.lower() not in existing}
382
+ if missing:
383
+ missing_by_table[table_name] = missing
384
+
385
+ if not missing_by_table:
386
+ return
387
+
388
+ expected_version = _latest_migration_version()
389
+ current_version, current_version_note = _read_schema_version(db)
390
+ remediation = _schema_remediation(expected_version, current_version, current_version_note)
391
+
392
+ missing_details = "; ".join(
393
+ f"{table}: {', '.join(sorted(cols))}"
394
+ for table, cols in sorted(missing_by_table.items())
395
+ )
396
+ message_parts = [
397
+ f"Missing critical columns: {missing_details}.",
398
+ ]
399
+ if expected_version:
400
+ message_parts.append(f"Expected schema version: {expected_version}.")
401
+ if current_version:
402
+ message_parts.append(f"Current schema version: {current_version}.")
403
+ elif current_version_note:
404
+ message_parts.append(f"Current schema version: unknown ({current_version_note}).")
405
+ message_parts.append(f"Remediation: {remediation}.")
406
+ raise RuntimeError(" ".join(message_parts))
407
+
408
+
409
+ def _latest_migration_version() -> str | None:
410
+ migrations = discover_migrations()
411
+ if not migrations:
412
+ return None
413
+ return max(migrations, key=lambda m: int(m.version)).version
414
+
415
+
416
+ def _read_schema_version(db: Database) -> tuple[str | None, str | None]:
417
+ try:
298
418
  rows = db.execute_returning(
299
- "SELECT column_name FROM information_schema.columns "
300
- "WHERE table_schema = current_schema() AND table_name = 'tx_intents'"
419
+ "SELECT version FROM schema_migrations ORDER BY version DESC LIMIT 1"
301
420
  )
302
- existing = {r["column_name"].lower() for r in rows}
303
-
304
- missing = required - existing
305
- if missing:
306
- raise RuntimeError(
307
- f"FATAL: tx_intents missing columns {missing}. "
308
- f"Migration 012 may have failed. Check DB manually."
421
+ except Exception as exc:
422
+ if _is_missing_schema_migrations(exc):
423
+ return None, "schema_migrations missing"
424
+ logger.warning(
425
+ "migrations.schema_read_failed",
426
+ db=_db_context(db),
427
+ error=str(exc)[:200],
309
428
  )
429
+ return None, "schema_migrations unreadable"
430
+
431
+ if not rows:
432
+ return None, "schema_migrations empty"
433
+ return str(rows[0]["version"]), None
434
+
435
+
436
+ def _schema_remediation(
437
+ expected_version: str | None,
438
+ current_version: str | None,
439
+ current_version_note: str | None,
440
+ ) -> str:
441
+ if current_version_note is not None:
442
+ return "DB appears out-of-band; delete/re-init or run a repair script if available"
443
+ if expected_version and current_version:
444
+ expected_int = _parse_version_int(expected_version)
445
+ current_int = _parse_version_int(current_version)
446
+ if expected_int is not None and current_int is not None:
447
+ if current_int < expected_int:
448
+ return "run `brawny db migrate`"
449
+ return "DB appears out-of-band; delete/re-init or run a repair script if available"
450
+
451
+
452
+ def _parse_version_int(version: str) -> int | None:
453
+ try:
454
+ return int(version)
455
+ except ValueError:
456
+ return None
310
457
 
311
458
 
312
459
  class Migrator:
@@ -25,7 +25,7 @@ CREATE TABLE IF NOT EXISTS block_state (
25
25
  -- 2. Block Hash History - for reorg detection
26
26
  -- ============================================================================
27
27
  CREATE TABLE IF NOT EXISTS block_hash_history (
28
- id SERIAL PRIMARY KEY,
28
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
29
29
  chain_id INTEGER NOT NULL,
30
30
  block_number BIGINT NOT NULL,
31
31
  block_hash VARCHAR(66) NOT NULL,
@@ -81,7 +81,7 @@ CREATE TABLE IF NOT EXISTS signers (
81
81
  -- 6. Nonce Reservations - tracks nonce allocation and status
82
82
  -- ============================================================================
83
83
  CREATE TABLE IF NOT EXISTS nonce_reservations (
84
- id SERIAL PRIMARY KEY,
84
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
85
85
  chain_id INTEGER NOT NULL,
86
86
  signer_address VARCHAR(42) NOT NULL,
87
87
  nonce BIGINT NOT NULL,
@@ -118,6 +118,7 @@ CREATE TABLE IF NOT EXISTS tx_intents (
118
118
  claim_token VARCHAR(100),
119
119
  claimed_at TIMESTAMP,
120
120
  claimed_by VARCHAR(200),
121
+ lease_expires_at TIMESTAMP,
121
122
  created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
122
123
  updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
123
124
  );
@@ -141,7 +142,7 @@ CREATE TABLE IF NOT EXISTS tx_attempts (
141
142
  tx_hash VARCHAR(66),
142
143
  gas_params_json TEXT NOT NULL,
143
144
  status VARCHAR(20) NOT NULL DEFAULT 'signed' CHECK (
144
- status IN ('signed', 'broadcast', 'pending', 'confirmed', 'failed', 'replaced')
145
+ status IN ('signed', 'pending_send', 'broadcast', 'pending', 'confirmed', 'failed', 'replaced')
145
146
  ),
146
147
  error_code VARCHAR(100),
147
148
  error_detail TEXT,
@@ -1,7 +1,7 @@
1
1
  -- Add composite index for efficient nonce gap age queries
2
2
  -- Makes the MIN(created_at) scan an index walk instead of heap scan
3
3
  -- Version: 010
4
- -- Note: CONCURRENTLY removed for SQLite compatibility (PostgreSQL-only syntax)
4
+ -- Note: CONCURRENTLY removed for SQLite compatibility
5
5
 
6
6
  CREATE INDEX IF NOT EXISTS idx_nonce_res_chain_signer_status_nonce_created
7
7
  ON nonce_reservations (chain_id, signer_address, status, nonce, created_at);
@@ -1,9 +1,8 @@
1
1
  -- Job logs for operator-visible snapshots during check()
2
2
  -- Version: 011
3
- -- Note: SERIAL becomes INTEGER on SQLite (handled by sqlite.py adapter)
4
3
 
5
4
  CREATE TABLE IF NOT EXISTS job_logs (
6
- id SERIAL PRIMARY KEY,
5
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
7
6
  chain_id INTEGER NOT NULL,
8
7
  job_id VARCHAR(200) NOT NULL,
9
8
  block_number BIGINT,
@@ -1,5 +1,5 @@
1
- -- Postgres only (SQLite handled in migrator)
2
- ALTER TABLE tx_intents ADD COLUMN IF NOT EXISTS claimed_by VARCHAR(200);
1
+ -- SQLite handled in migrator (idempotent)
2
+ ALTER TABLE tx_intents ADD COLUMN claimed_by VARCHAR(200);
3
3
 
4
4
  INSERT INTO schema_migrations (version) VALUES ('012')
5
5
  ON CONFLICT (version) DO NOTHING;
@@ -0,0 +1,10 @@
1
+ -- Add binding identifier for attempts and enforce uniqueness per binding
2
+
3
+ ALTER TABLE tx_intents ADD COLUMN broadcast_binding_id UUID;
4
+ ALTER TABLE tx_attempts ADD COLUMN endpoint_binding_id UUID;
5
+
6
+ CREATE UNIQUE INDEX IF NOT EXISTS uq_tx_attempts_intent_nonce_binding_hash
7
+ ON tx_attempts(intent_id, nonce, endpoint_binding_id, tx_hash)
8
+ WHERE tx_hash IS NOT NULL AND endpoint_binding_id IS NOT NULL;
9
+
10
+ INSERT INTO schema_migrations (version) VALUES ('013');
@@ -0,0 +1,5 @@
1
+ -- Add lease_expires_at for intent claim leases
2
+ ALTER TABLE tx_intents ADD COLUMN lease_expires_at TIMESTAMP;
3
+
4
+ INSERT INTO schema_migrations (version) VALUES ('014')
5
+ ON CONFLICT (version) DO NOTHING;
@@ -0,0 +1,14 @@
1
+ -- Add signer_alias columns for intent/transaction auditing
2
+ -- Note: SQLite does not support "IF NOT EXISTS" for ALTER TABLE ADD COLUMN
3
+
4
+ ALTER TABLE tx_intents ADD COLUMN signer_alias VARCHAR(200);
5
+
6
+ -- Normalize existing address data to lowercase canonical form.
7
+ UPDATE signers SET signer_address = lower(signer_address);
8
+ UPDATE nonce_reservations SET signer_address = lower(signer_address);
9
+ UPDATE tx_intents SET signer_address = lower(signer_address), to_address = lower(to_address);
10
+ UPDATE abi_cache SET address = lower(address);
11
+ UPDATE proxy_cache SET proxy_address = lower(proxy_address),
12
+ implementation_address = lower(implementation_address);
13
+
14
+ INSERT INTO schema_migrations (version) VALUES ('015');
@@ -0,0 +1,32 @@
1
+ -- Description: Add signer quarantine, runtime controls, and mutation audit
2
+
3
+ -- Signer containment
4
+ ALTER TABLE signers ADD COLUMN quarantined_at TIMESTAMP;
5
+ ALTER TABLE signers ADD COLUMN quarantine_reason TEXT;
6
+ ALTER TABLE signers ADD COLUMN replacements_paused INTEGER NOT NULL DEFAULT 0;
7
+
8
+ -- Runtime controls (containment with TTL)
9
+ CREATE TABLE IF NOT EXISTS runtime_controls (
10
+ control VARCHAR(100) PRIMARY KEY,
11
+ active INTEGER NOT NULL DEFAULT 0,
12
+ expires_at TIMESTAMP,
13
+ reason TEXT,
14
+ actor TEXT,
15
+ mode VARCHAR(20) NOT NULL DEFAULT 'auto',
16
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
17
+ );
18
+
19
+ -- Durable mutation audit (minimal, append-only)
20
+ CREATE TABLE IF NOT EXISTS mutation_audit (
21
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
22
+ entity_type VARCHAR(50) NOT NULL,
23
+ entity_id TEXT NOT NULL,
24
+ action VARCHAR(100) NOT NULL,
25
+ actor TEXT,
26
+ reason TEXT,
27
+ source TEXT,
28
+ metadata_json TEXT,
29
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
30
+ );
31
+
32
+ INSERT INTO schema_migrations (version) VALUES ('016');
@@ -0,0 +1,6 @@
1
+ -- Description: Add job drain controls
2
+
3
+ ALTER TABLE jobs ADD COLUMN drain_until TIMESTAMP;
4
+ ALTER TABLE jobs ADD COLUMN drain_reason TEXT;
5
+
6
+ INSERT INTO schema_migrations (version) VALUES ('017');
@@ -0,0 +1,20 @@
1
+ -- Audit table for nonce force resets
2
+ -- Provides durable record of destructive nonce operations for incident investigation
3
+
4
+ CREATE TABLE IF NOT EXISTS nonce_reset_audit (
5
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
6
+ chain_id INTEGER NOT NULL,
7
+ signer_address TEXT NOT NULL,
8
+ old_next_nonce INTEGER,
9
+ new_next_nonce INTEGER NOT NULL,
10
+ released_reservations INTEGER NOT NULL DEFAULT 0,
11
+ source TEXT NOT NULL, -- 'cli', 'executor', 'api'
12
+ reason TEXT,
13
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
14
+ );
15
+
16
+ CREATE INDEX IF NOT EXISTS idx_nonce_reset_audit_signer
17
+ ON nonce_reset_audit(chain_id, signer_address);
18
+
19
+ CREATE INDEX IF NOT EXISTS idx_nonce_reset_audit_created
20
+ ON nonce_reset_audit(created_at);
@@ -0,0 +1,8 @@
1
+ -- Add job cooldown tracking
2
+ CREATE TABLE IF NOT EXISTS job_cooldowns (
3
+ cooldown_key TEXT PRIMARY KEY,
4
+ last_intent_at INTEGER NOT NULL
5
+ );
6
+
7
+ INSERT INTO schema_migrations (version) VALUES ('019')
8
+ ON CONFLICT (version) DO NOTHING;
@@ -0,0 +1,7 @@
1
+ -- Enforce single initial attempt per intent+nonce (replacements allowed)
2
+
3
+ CREATE UNIQUE INDEX IF NOT EXISTS uq_tx_attempts_intent_nonce_initial
4
+ ON tx_attempts(intent_id, nonce)
5
+ WHERE replaces_attempt_id IS NULL;
6
+
7
+ INSERT INTO schema_migrations (version) VALUES ('020');
brawny/db/ops/__init__.py CHANGED
@@ -1,29 +1,7 @@
1
- """Database operations modules.
2
-
3
- Flat function-based operations for each domain:
4
- - blocks: Block state and hash history
5
- - jobs: Job configuration and KV store
6
- - intents: Transaction intents
7
- - attempts: Transaction attempts
8
- - nonces: Signer state and nonce reservations
9
- - cache: ABI and proxy cache
10
-
11
- Usage:
12
- from brawny.db import ops
13
-
14
- # Use functions from specific modules
15
- state = ops.blocks.get_block_state(db, chain_id)
16
- job = ops.jobs.get_job(db, job_id)
17
- intent = ops.intents.create_intent(db, ...)
18
- """
1
+ """Database operations modules."""
19
2
 
20
3
  from __future__ import annotations
21
4
 
22
- from brawny.db.ops import blocks
23
- from brawny.db.ops import jobs
24
- from brawny.db.ops import intents
25
- from brawny.db.ops import attempts
26
- from brawny.db.ops import nonces
27
- from brawny.db.ops import cache
5
+ from brawny.db.ops import logs
28
6
 
29
- __all__ = ["blocks", "jobs", "intents", "attempts", "nonces", "cache"]
7
+ __all__ = ["logs"]
brawny/db/ops/logs.py CHANGED
@@ -71,8 +71,7 @@ def list_all_logs(
71
71
 
72
72
  def list_latest_logs(db: "Database", chain_id: int) -> list[dict[str, Any]]:
73
73
  """Get the most recent log entry per job."""
74
- query = Q.LIST_LATEST_JOB_LOGS[db.dialect]
75
- rows = db.execute_returning(query, {"chain_id": chain_id})
74
+ rows = db.execute_returning(Q.LIST_LATEST_JOB_LOGS, {"chain_id": chain_id})
76
75
  return [_row_to_log(row) for row in rows]
77
76
 
78
77