brawny 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. brawny/__init__.py +106 -0
  2. brawny/_context.py +232 -0
  3. brawny/_rpc/__init__.py +38 -0
  4. brawny/_rpc/broadcast.py +172 -0
  5. brawny/_rpc/clients.py +98 -0
  6. brawny/_rpc/context.py +49 -0
  7. brawny/_rpc/errors.py +252 -0
  8. brawny/_rpc/gas.py +158 -0
  9. brawny/_rpc/manager.py +982 -0
  10. brawny/_rpc/selector.py +156 -0
  11. brawny/accounts.py +534 -0
  12. brawny/alerts/__init__.py +132 -0
  13. brawny/alerts/abi_resolver.py +530 -0
  14. brawny/alerts/base.py +152 -0
  15. brawny/alerts/context.py +271 -0
  16. brawny/alerts/contracts.py +635 -0
  17. brawny/alerts/encoded_call.py +201 -0
  18. brawny/alerts/errors.py +267 -0
  19. brawny/alerts/events.py +680 -0
  20. brawny/alerts/function_caller.py +364 -0
  21. brawny/alerts/health.py +185 -0
  22. brawny/alerts/routing.py +118 -0
  23. brawny/alerts/send.py +364 -0
  24. brawny/api.py +660 -0
  25. brawny/chain.py +93 -0
  26. brawny/cli/__init__.py +16 -0
  27. brawny/cli/app.py +17 -0
  28. brawny/cli/bootstrap.py +37 -0
  29. brawny/cli/commands/__init__.py +41 -0
  30. brawny/cli/commands/abi.py +93 -0
  31. brawny/cli/commands/accounts.py +632 -0
  32. brawny/cli/commands/console.py +495 -0
  33. brawny/cli/commands/contract.py +139 -0
  34. brawny/cli/commands/health.py +112 -0
  35. brawny/cli/commands/init_project.py +86 -0
  36. brawny/cli/commands/intents.py +130 -0
  37. brawny/cli/commands/job_dev.py +254 -0
  38. brawny/cli/commands/jobs.py +308 -0
  39. brawny/cli/commands/logs.py +87 -0
  40. brawny/cli/commands/maintenance.py +182 -0
  41. brawny/cli/commands/migrate.py +51 -0
  42. brawny/cli/commands/networks.py +253 -0
  43. brawny/cli/commands/run.py +249 -0
  44. brawny/cli/commands/script.py +209 -0
  45. brawny/cli/commands/signer.py +248 -0
  46. brawny/cli/helpers.py +265 -0
  47. brawny/cli_templates.py +1445 -0
  48. brawny/config/__init__.py +74 -0
  49. brawny/config/models.py +404 -0
  50. brawny/config/parser.py +633 -0
  51. brawny/config/routing.py +55 -0
  52. brawny/config/validation.py +246 -0
  53. brawny/daemon/__init__.py +14 -0
  54. brawny/daemon/context.py +69 -0
  55. brawny/daemon/core.py +702 -0
  56. brawny/daemon/loops.py +327 -0
  57. brawny/db/__init__.py +78 -0
  58. brawny/db/base.py +986 -0
  59. brawny/db/base_new.py +165 -0
  60. brawny/db/circuit_breaker.py +97 -0
  61. brawny/db/global_cache.py +298 -0
  62. brawny/db/mappers.py +182 -0
  63. brawny/db/migrate.py +349 -0
  64. brawny/db/migrations/001_init.sql +186 -0
  65. brawny/db/migrations/002_add_included_block.sql +7 -0
  66. brawny/db/migrations/003_add_broadcast_at.sql +10 -0
  67. brawny/db/migrations/004_broadcast_binding.sql +20 -0
  68. brawny/db/migrations/005_add_retry_after.sql +9 -0
  69. brawny/db/migrations/006_add_retry_count_column.sql +11 -0
  70. brawny/db/migrations/007_add_gap_tracking.sql +18 -0
  71. brawny/db/migrations/008_add_transactions.sql +72 -0
  72. brawny/db/migrations/009_add_intent_metadata.sql +5 -0
  73. brawny/db/migrations/010_add_nonce_gap_index.sql +9 -0
  74. brawny/db/migrations/011_add_job_logs.sql +24 -0
  75. brawny/db/migrations/012_add_claimed_by.sql +5 -0
  76. brawny/db/ops/__init__.py +29 -0
  77. brawny/db/ops/attempts.py +108 -0
  78. brawny/db/ops/blocks.py +83 -0
  79. brawny/db/ops/cache.py +93 -0
  80. brawny/db/ops/intents.py +296 -0
  81. brawny/db/ops/jobs.py +110 -0
  82. brawny/db/ops/logs.py +97 -0
  83. brawny/db/ops/nonces.py +322 -0
  84. brawny/db/postgres.py +2535 -0
  85. brawny/db/postgres_new.py +196 -0
  86. brawny/db/queries.py +584 -0
  87. brawny/db/sqlite.py +2733 -0
  88. brawny/db/sqlite_new.py +191 -0
  89. brawny/history.py +126 -0
  90. brawny/interfaces.py +136 -0
  91. brawny/invariants.py +155 -0
  92. brawny/jobs/__init__.py +26 -0
  93. brawny/jobs/base.py +287 -0
  94. brawny/jobs/discovery.py +233 -0
  95. brawny/jobs/job_validation.py +111 -0
  96. brawny/jobs/kv.py +125 -0
  97. brawny/jobs/registry.py +283 -0
  98. brawny/keystore.py +484 -0
  99. brawny/lifecycle.py +551 -0
  100. brawny/logging.py +290 -0
  101. brawny/metrics.py +594 -0
  102. brawny/model/__init__.py +53 -0
  103. brawny/model/contexts.py +319 -0
  104. brawny/model/enums.py +70 -0
  105. brawny/model/errors.py +194 -0
  106. brawny/model/events.py +93 -0
  107. brawny/model/startup.py +20 -0
  108. brawny/model/types.py +483 -0
  109. brawny/networks/__init__.py +96 -0
  110. brawny/networks/config.py +269 -0
  111. brawny/networks/manager.py +423 -0
  112. brawny/obs/__init__.py +67 -0
  113. brawny/obs/emit.py +158 -0
  114. brawny/obs/health.py +175 -0
  115. brawny/obs/heartbeat.py +133 -0
  116. brawny/reconciliation.py +108 -0
  117. brawny/scheduler/__init__.py +19 -0
  118. brawny/scheduler/poller.py +472 -0
  119. brawny/scheduler/reorg.py +632 -0
  120. brawny/scheduler/runner.py +708 -0
  121. brawny/scheduler/shutdown.py +371 -0
  122. brawny/script_tx.py +297 -0
  123. brawny/scripting.py +251 -0
  124. brawny/startup.py +76 -0
  125. brawny/telegram.py +393 -0
  126. brawny/testing.py +108 -0
  127. brawny/tx/__init__.py +41 -0
  128. brawny/tx/executor.py +1071 -0
  129. brawny/tx/fees.py +50 -0
  130. brawny/tx/intent.py +423 -0
  131. brawny/tx/monitor.py +628 -0
  132. brawny/tx/nonce.py +498 -0
  133. brawny/tx/replacement.py +456 -0
  134. brawny/tx/utils.py +26 -0
  135. brawny/utils.py +205 -0
  136. brawny/validation.py +69 -0
  137. brawny-0.1.13.dist-info/METADATA +156 -0
  138. brawny-0.1.13.dist-info/RECORD +141 -0
  139. brawny-0.1.13.dist-info/WHEEL +5 -0
  140. brawny-0.1.13.dist-info/entry_points.txt +2 -0
  141. brawny-0.1.13.dist-info/top_level.txt +1 -0
@@ -0,0 +1,186 @@
1
+ -- brawny initial schema migration
2
+ -- Version: 001
3
+ -- Description: Create all core tables for the brawny framework
4
+
5
+ -- ============================================================================
6
+ -- Migration tracking table
7
+ -- ============================================================================
8
+ CREATE TABLE IF NOT EXISTS schema_migrations (
9
+ version VARCHAR(20) PRIMARY KEY,
10
+ applied_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
11
+ );
12
+
13
+ -- ============================================================================
14
+ -- 1. Block State - tracks last processed block per chain
15
+ -- ============================================================================
16
+ CREATE TABLE IF NOT EXISTS block_state (
17
+ chain_id INTEGER PRIMARY KEY,
18
+ last_processed_block_number BIGINT NOT NULL,
19
+ last_processed_block_hash VARCHAR(66) NOT NULL,
20
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
21
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
22
+ );
23
+
24
+ -- ============================================================================
25
+ -- 2. Block Hash History - for reorg detection
26
+ -- ============================================================================
27
+ CREATE TABLE IF NOT EXISTS block_hash_history (
28
+ id SERIAL PRIMARY KEY,
29
+ chain_id INTEGER NOT NULL,
30
+ block_number BIGINT NOT NULL,
31
+ block_hash VARCHAR(66) NOT NULL,
32
+ inserted_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
33
+ UNIQUE (chain_id, block_number)
34
+ );
35
+
36
+ CREATE INDEX IF NOT EXISTS idx_block_hash_history_chain_block
37
+ ON block_hash_history(chain_id, block_number DESC);
38
+
39
+ -- ============================================================================
40
+ -- 3. Jobs - job registry and configuration
41
+ -- ============================================================================
42
+ CREATE TABLE IF NOT EXISTS jobs (
43
+ job_id VARCHAR(200) PRIMARY KEY,
44
+ job_name VARCHAR(200) NOT NULL,
45
+ enabled BOOLEAN NOT NULL DEFAULT true,
46
+ check_interval_blocks INTEGER NOT NULL DEFAULT 1,
47
+ last_checked_block_number BIGINT,
48
+ last_triggered_block_number BIGINT,
49
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
50
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
51
+ );
52
+
53
+ CREATE INDEX IF NOT EXISTS idx_jobs_enabled ON jobs(enabled) WHERE enabled = true;
54
+
55
+ -- ============================================================================
56
+ -- 4. Job KV Store - persistent key-value storage per job
57
+ -- ============================================================================
58
+ CREATE TABLE IF NOT EXISTS job_kv (
59
+ job_id VARCHAR(200) NOT NULL REFERENCES jobs(job_id) ON DELETE CASCADE,
60
+ key VARCHAR(200) NOT NULL,
61
+ value_json TEXT NOT NULL,
62
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
63
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
64
+ PRIMARY KEY (job_id, key)
65
+ );
66
+
67
+ -- ============================================================================
68
+ -- 5. Signers - tracks nonce state per signer per chain
69
+ -- ============================================================================
70
+ CREATE TABLE IF NOT EXISTS signers (
71
+ chain_id INTEGER NOT NULL,
72
+ signer_address VARCHAR(42) NOT NULL,
73
+ next_nonce BIGINT NOT NULL DEFAULT 0,
74
+ last_synced_chain_nonce BIGINT,
75
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
76
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
77
+ PRIMARY KEY (chain_id, signer_address)
78
+ );
79
+
80
+ -- ============================================================================
81
+ -- 6. Nonce Reservations - tracks nonce allocation and status
82
+ -- ============================================================================
83
+ CREATE TABLE IF NOT EXISTS nonce_reservations (
84
+ id SERIAL PRIMARY KEY,
85
+ chain_id INTEGER NOT NULL,
86
+ signer_address VARCHAR(42) NOT NULL,
87
+ nonce BIGINT NOT NULL,
88
+ status VARCHAR(20) NOT NULL CHECK (status IN ('reserved', 'in_flight', 'released', 'orphaned')),
89
+ intent_id UUID,
90
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
91
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
92
+ UNIQUE (chain_id, signer_address, nonce)
93
+ );
94
+
95
+ CREATE INDEX IF NOT EXISTS idx_nonce_reservations_signer_status
96
+ ON nonce_reservations(chain_id, signer_address, status);
97
+
98
+ -- ============================================================================
99
+ -- 7. Transaction Intents - durable transaction intent records
100
+ -- ============================================================================
101
+ CREATE TABLE IF NOT EXISTS tx_intents (
102
+ intent_id UUID PRIMARY KEY,
103
+ job_id VARCHAR(200) NOT NULL,
104
+ chain_id INTEGER NOT NULL,
105
+ signer_address VARCHAR(42) NOT NULL,
106
+ idempotency_key VARCHAR(200) NOT NULL,
107
+ to_address VARCHAR(42) NOT NULL,
108
+ data TEXT,
109
+ value_wei VARCHAR(78) NOT NULL DEFAULT '0',
110
+ gas_limit BIGINT,
111
+ max_fee_per_gas VARCHAR(78),
112
+ max_priority_fee_per_gas VARCHAR(78),
113
+ min_confirmations INTEGER NOT NULL DEFAULT 1,
114
+ deadline_ts TIMESTAMP,
115
+ status VARCHAR(20) NOT NULL DEFAULT 'created' CHECK (
116
+ status IN ('created', 'claimed', 'sending', 'pending', 'confirmed', 'failed', 'abandoned')
117
+ ),
118
+ claim_token VARCHAR(100),
119
+ claimed_at TIMESTAMP,
120
+ claimed_by VARCHAR(200),
121
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
122
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
123
+ );
124
+
125
+ CREATE INDEX IF NOT EXISTS idx_tx_intents_status ON tx_intents(status);
126
+ CREATE INDEX IF NOT EXISTS idx_tx_intents_job_status ON tx_intents(job_id, status);
127
+ CREATE INDEX IF NOT EXISTS idx_tx_intents_signer_status ON tx_intents(chain_id, signer_address, status);
128
+ CREATE INDEX IF NOT EXISTS idx_tx_intents_created ON tx_intents(created_at);
129
+
130
+ -- Idempotency is scoped to (chain_id, signer_address)
131
+ CREATE UNIQUE INDEX IF NOT EXISTS uq_tx_intents_idempotency_scoped
132
+ ON tx_intents(chain_id, signer_address, idempotency_key);
133
+
134
+ -- ============================================================================
135
+ -- 8. Transaction Attempts - individual broadcast attempts
136
+ -- ============================================================================
137
+ CREATE TABLE IF NOT EXISTS tx_attempts (
138
+ attempt_id UUID PRIMARY KEY,
139
+ intent_id UUID NOT NULL REFERENCES tx_intents(intent_id),
140
+ nonce BIGINT NOT NULL,
141
+ tx_hash VARCHAR(66),
142
+ gas_params_json TEXT NOT NULL,
143
+ status VARCHAR(20) NOT NULL DEFAULT 'signed' CHECK (
144
+ status IN ('signed', 'broadcast', 'pending', 'confirmed', 'failed', 'replaced')
145
+ ),
146
+ error_code VARCHAR(100),
147
+ error_detail TEXT,
148
+ replaces_attempt_id UUID REFERENCES tx_attempts(attempt_id),
149
+ broadcast_block BIGINT,
150
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
151
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
152
+ );
153
+
154
+ CREATE INDEX IF NOT EXISTS idx_tx_attempts_intent ON tx_attempts(intent_id);
155
+ CREATE INDEX IF NOT EXISTS idx_tx_attempts_tx_hash ON tx_attempts(tx_hash) WHERE tx_hash IS NOT NULL;
156
+ CREATE INDEX IF NOT EXISTS idx_tx_attempts_status ON tx_attempts(status);
157
+
158
+ -- ============================================================================
159
+ -- 9. ABI Cache - cached contract ABIs
160
+ -- ============================================================================
161
+ CREATE TABLE IF NOT EXISTS abi_cache (
162
+ chain_id INTEGER NOT NULL,
163
+ address VARCHAR(42) NOT NULL,
164
+ abi_json TEXT NOT NULL,
165
+ source VARCHAR(30) NOT NULL CHECK (source IN ('etherscan', 'sourcify', 'manual', 'proxy_implementation')),
166
+ resolved_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
167
+ PRIMARY KEY (chain_id, address)
168
+ );
169
+
170
+ CREATE INDEX IF NOT EXISTS idx_abi_cache_resolved ON abi_cache(resolved_at);
171
+
172
+ -- ============================================================================
173
+ -- 10. Proxy Cache - cached proxy-to-implementation mappings
174
+ -- ============================================================================
175
+ CREATE TABLE IF NOT EXISTS proxy_cache (
176
+ chain_id INTEGER NOT NULL,
177
+ proxy_address VARCHAR(42) NOT NULL,
178
+ implementation_address VARCHAR(42) NOT NULL,
179
+ resolved_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
180
+ PRIMARY KEY (chain_id, proxy_address)
181
+ );
182
+
183
+ -- ============================================================================
184
+ -- Record this migration
185
+ -- ============================================================================
186
+ INSERT INTO schema_migrations (version) VALUES ('001');
@@ -0,0 +1,7 @@
1
+ -- Add included_block to tx_attempts for receipt inclusion tracking
2
+ -- Version: 002
3
+
4
+ ALTER TABLE tx_attempts
5
+ ADD COLUMN included_block BIGINT;
6
+
7
+ INSERT INTO schema_migrations (version) VALUES ('002');
@@ -0,0 +1,10 @@
1
+ -- Add broadcast_at to tx_attempts for accurate confirmation timing
2
+
3
+ ALTER TABLE tx_attempts
4
+ ADD COLUMN broadcast_at TIMESTAMP;
5
+
6
+ UPDATE tx_attempts
7
+ SET broadcast_at = updated_at
8
+ WHERE broadcast_at IS NULL;
9
+
10
+ INSERT INTO schema_migrations (version) VALUES ('003');
@@ -0,0 +1,20 @@
1
+ -- Migration: Broadcast binding for RPC groups privacy invariant
2
+ --
3
+ -- This adds columns to track which RPC group and endpoints were used for
4
+ -- the first broadcast of an intent. Retries MUST use the same endpoint list
5
+ -- to preserve the privacy invariant (no cross-group fallback).
6
+
7
+ -- Broadcast binding on intents (set on first successful broadcast)
8
+ ALTER TABLE tx_intents ADD COLUMN broadcast_group VARCHAR(100);
9
+ ALTER TABLE tx_intents ADD COLUMN broadcast_endpoints_json TEXT;
10
+
11
+ -- Index for querying by broadcast group
12
+ CREATE INDEX IF NOT EXISTS idx_tx_intents_broadcast_group
13
+ ON tx_intents(broadcast_group);
14
+
15
+ -- Audit trail on attempts (which endpoint was actually used)
16
+ ALTER TABLE tx_attempts ADD COLUMN broadcast_group VARCHAR(100);
17
+ ALTER TABLE tx_attempts ADD COLUMN endpoint_url VARCHAR(500);
18
+
19
+ -- Record migration
20
+ INSERT INTO schema_migrations (version) VALUES ('004');
@@ -0,0 +1,9 @@
1
+ -- Add retry_after to tx_intents for execution backoff
2
+ -- Version: 005
3
+
4
+ ALTER TABLE tx_intents ADD COLUMN retry_after TIMESTAMP;
5
+
6
+ CREATE INDEX IF NOT EXISTS idx_tx_intents_retry_after
7
+ ON tx_intents(retry_after);
8
+
9
+ INSERT INTO schema_migrations (version) VALUES ('005');
@@ -0,0 +1,11 @@
1
+ -- Add retry_count to tx_intents for tracking execution attempts
2
+ -- Version: 006
3
+ -- Description: Move retry tracking from job_kv to native column for atomicity
4
+
5
+ ALTER TABLE tx_intents ADD COLUMN retry_count INTEGER NOT NULL DEFAULT 0;
6
+
7
+ -- Index for querying intents by retry count (e.g., finding frequently retried intents)
8
+ CREATE INDEX IF NOT EXISTS idx_tx_intents_retry_count
9
+ ON tx_intents(retry_count) WHERE retry_count > 0;
10
+
11
+ INSERT INTO schema_migrations (version) VALUES ('006');
@@ -0,0 +1,18 @@
1
+ -- Migration 007: Add gap tracking columns to signers table
2
+ --
3
+ -- Adds:
4
+ -- - gap_started_at: Timestamp when nonce gap blocking started (for alerts)
5
+ -- - alias: Optional human-readable alias for signers
6
+ --
7
+ -- Part of nonce policy simplification - see NONCE.md
8
+
9
+ -- Add gap tracking column (nullable timestamp)
10
+ ALTER TABLE signers ADD COLUMN gap_started_at TIMESTAMP;
11
+
12
+ -- Add optional alias column
13
+ ALTER TABLE signers ADD COLUMN alias VARCHAR(50);
14
+
15
+ -- Index for alias lookup (partial index, only non-null aliases)
16
+ CREATE INDEX IF NOT EXISTS idx_signers_alias ON signers (chain_id, alias) WHERE alias IS NOT NULL;
17
+
18
+ INSERT INTO schema_migrations (version) VALUES ('007');
@@ -0,0 +1,72 @@
1
+ -- brawny transactions table migration
2
+ -- Version: 008
3
+ -- Description: Add single Transaction model replacing TxIntent + TxAttempt
4
+ --
5
+ -- This is Phase 1 of the ATTEMPT_MODEL.md simplification:
6
+ -- - Single transactions table (no joins)
7
+ -- - 4-state TxStatus (created, broadcast, confirmed, failed)
8
+ -- - JSON blobs for rarely-queried fields
9
+ -- - Append-only tx_hash_history for debugging
10
+
11
+ -- ============================================================================
12
+ -- Transactions - single model for job transaction lifecycle
13
+ -- ============================================================================
14
+ CREATE TABLE IF NOT EXISTS transactions (
15
+ -- Identity (queryable, indexed)
16
+ tx_id UUID PRIMARY KEY,
17
+ job_id VARCHAR(200) NOT NULL,
18
+ chain_id INTEGER NOT NULL,
19
+ idempotency_key VARCHAR(200) NOT NULL,
20
+
21
+ -- Payload (immutable after creation)
22
+ signer_address VARCHAR(42) NOT NULL,
23
+ to_address VARCHAR(42) NOT NULL,
24
+ data TEXT,
25
+ value_wei VARCHAR(78) NOT NULL DEFAULT '0',
26
+ min_confirmations INTEGER NOT NULL DEFAULT 1,
27
+ deadline_ts TIMESTAMP,
28
+
29
+ -- Status (queryable)
30
+ status VARCHAR(20) NOT NULL DEFAULT 'created'
31
+ CHECK (status IN ('created', 'broadcast', 'confirmed', 'failed')),
32
+ failure_type VARCHAR(50),
33
+
34
+ -- Broadcast state (queryable)
35
+ current_tx_hash VARCHAR(66),
36
+ current_nonce BIGINT,
37
+ replacement_count INTEGER NOT NULL DEFAULT 0,
38
+
39
+ -- Worker coordination (queryable)
40
+ claim_token VARCHAR(100),
41
+ claimed_at TIMESTAMP,
42
+
43
+ -- Confirmation (queryable)
44
+ included_block BIGINT,
45
+ confirmed_at TIMESTAMP,
46
+
47
+ -- Audit (queryable)
48
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
49
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
50
+
51
+ -- JSON BLOBS (rarely queried - no indexes)
52
+ gas_params_json TEXT, -- {"gas_limit": N, "max_fee": N, "priority_fee": N}
53
+ broadcast_info_json TEXT, -- {"group": str, "endpoints": [...]}
54
+ error_info_json TEXT, -- ErrorInfo as JSON
55
+ tx_hash_history TEXT -- Append-only JSON array of TxHashRecord
56
+ );
57
+
58
+ -- Indexes (only on queryable columns)
59
+ CREATE INDEX IF NOT EXISTS idx_transactions_status ON transactions(status);
60
+ CREATE INDEX IF NOT EXISTS idx_transactions_job_status ON transactions(job_id, status);
61
+ CREATE INDEX IF NOT EXISTS idx_transactions_signer ON transactions(chain_id, signer_address);
62
+ CREATE INDEX IF NOT EXISTS idx_transactions_tx_hash ON transactions(current_tx_hash) WHERE current_tx_hash IS NOT NULL;
63
+ CREATE INDEX IF NOT EXISTS idx_transactions_created ON transactions(created_at);
64
+
65
+ -- Idempotency is scoped to (chain_id, signer_address)
66
+ CREATE UNIQUE INDEX IF NOT EXISTS uq_transactions_idempotency_scoped
67
+ ON transactions(chain_id, signer_address, idempotency_key);
68
+
69
+ -- ============================================================================
70
+ -- Record this migration
71
+ -- ============================================================================
72
+ INSERT INTO schema_migrations (version) VALUES ('008');
@@ -0,0 +1,5 @@
1
+ -- Add metadata_json column to tx_intents table
2
+ -- Stores per-intent context for alerts (JSON-serializable dict)
3
+ -- trigger.reason is auto-merged into this field
4
+
5
+ ALTER TABLE tx_intents ADD COLUMN metadata_json TEXT;
@@ -0,0 +1,9 @@
1
+ -- Add composite index for efficient nonce gap age queries
2
+ -- Makes the MIN(created_at) scan an index walk instead of heap scan
3
+ -- Version: 010
4
+ -- Note: CONCURRENTLY removed for SQLite compatibility (PostgreSQL-only syntax)
5
+
6
+ CREATE INDEX IF NOT EXISTS idx_nonce_res_chain_signer_status_nonce_created
7
+ ON nonce_reservations (chain_id, signer_address, status, nonce, created_at);
8
+
9
+ INSERT INTO schema_migrations (version) VALUES ('010');
@@ -0,0 +1,24 @@
1
+ -- Job logs for operator-visible snapshots during check()
2
+ -- Version: 011
3
+ -- Note: SERIAL becomes INTEGER on SQLite (handled by sqlite.py adapter)
4
+
5
+ CREATE TABLE IF NOT EXISTS job_logs (
6
+ id SERIAL PRIMARY KEY,
7
+ chain_id INTEGER NOT NULL,
8
+ job_id VARCHAR(200) NOT NULL,
9
+ block_number BIGINT,
10
+ ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
11
+ level VARCHAR(20) NOT NULL DEFAULT 'info',
12
+ fields_json TEXT NOT NULL
13
+ );
14
+
15
+ -- Primary query: recent logs for a job
16
+ CREATE INDEX IF NOT EXISTS idx_job_logs_job_ts
17
+ ON job_logs(chain_id, job_id, ts DESC);
18
+
19
+ -- Cleanup query: purge old logs (per-chain)
20
+ CREATE INDEX IF NOT EXISTS idx_job_logs_chain_ts
21
+ ON job_logs(chain_id, ts);
22
+
23
+ -- Record migration
24
+ INSERT INTO schema_migrations (version) VALUES ('011');
@@ -0,0 +1,5 @@
1
+ -- Postgres only (SQLite handled in migrator)
2
+ ALTER TABLE tx_intents ADD COLUMN IF NOT EXISTS claimed_by VARCHAR(200);
3
+
4
+ INSERT INTO schema_migrations (version) VALUES ('012')
5
+ ON CONFLICT (version) DO NOTHING;
@@ -0,0 +1,29 @@
1
+ """Database operations modules.
2
+
3
+ Flat function-based operations for each domain:
4
+ - blocks: Block state and hash history
5
+ - jobs: Job configuration and KV store
6
+ - intents: Transaction intents
7
+ - attempts: Transaction attempts
8
+ - nonces: Signer state and nonce reservations
9
+ - cache: ABI and proxy cache
10
+
11
+ Usage:
12
+ from brawny.db import ops
13
+
14
+ # Use functions from specific modules
15
+ state = ops.blocks.get_block_state(db, chain_id)
16
+ job = ops.jobs.get_job(db, job_id)
17
+ intent = ops.intents.create_intent(db, ...)
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ from brawny.db.ops import blocks
23
+ from brawny.db.ops import jobs
24
+ from brawny.db.ops import intents
25
+ from brawny.db.ops import attempts
26
+ from brawny.db.ops import nonces
27
+ from brawny.db.ops import cache
28
+
29
+ __all__ = ["blocks", "jobs", "intents", "attempts", "nonces", "cache"]
@@ -0,0 +1,108 @@
1
+ """Transaction attempt operations."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from datetime import datetime
6
+ from uuid import UUID
7
+
8
+ from brawny.db.base_new import Database
9
+ from brawny.db import queries as Q
10
+ from brawny.db import mappers as M
11
+ from brawny.model.types import TxAttempt, GasParams
12
+
13
+
14
+ def create_attempt(
15
+ db: Database,
16
+ attempt_id: UUID,
17
+ intent_id: UUID,
18
+ nonce: int,
19
+ tx_hash: str | None,
20
+ gas_params: GasParams,
21
+ status: str,
22
+ broadcast_block: int | None = None,
23
+ broadcast_at: datetime | None = None,
24
+ broadcast_group: str | None = None,
25
+ endpoint_url: str | None = None,
26
+ ) -> TxAttempt | None:
27
+ """Create a new transaction attempt."""
28
+ row = db.fetch_one(Q.CREATE_ATTEMPT, {
29
+ "attempt_id": str(attempt_id),
30
+ "intent_id": str(intent_id),
31
+ "nonce": nonce,
32
+ "tx_hash": tx_hash,
33
+ "gas_params_json": gas_params.to_json(),
34
+ "status": status,
35
+ "broadcast_block": broadcast_block,
36
+ "broadcast_at": broadcast_at,
37
+ "broadcast_group": broadcast_group,
38
+ "endpoint_url": endpoint_url,
39
+ })
40
+ return M.row_to_attempt(row) if row else None
41
+
42
+
43
+ def get_attempt(db: Database, attempt_id: UUID) -> TxAttempt | None:
44
+ """Get attempt by ID."""
45
+ row = db.fetch_one(Q.GET_ATTEMPT, {"attempt_id": str(attempt_id)})
46
+ return M.row_to_attempt(row) if row else None
47
+
48
+
49
+ def get_attempt_by_tx_hash(db: Database, tx_hash: str) -> TxAttempt | None:
50
+ """Get attempt by transaction hash."""
51
+ row = db.fetch_one(Q.GET_ATTEMPT_BY_TX_HASH, {"tx_hash": tx_hash})
52
+ return M.row_to_attempt(row) if row else None
53
+
54
+
55
+ def get_attempts_for_intent(db: Database, intent_id: UUID) -> list[TxAttempt]:
56
+ """Get all attempts for an intent, ordered by created_at DESC."""
57
+ rows = db.fetch_all(Q.GET_ATTEMPTS_FOR_INTENT, {"intent_id": str(intent_id)})
58
+ return [M.row_to_attempt(row) for row in rows]
59
+
60
+
61
+ def get_latest_attempt_for_intent(db: Database, intent_id: UUID) -> TxAttempt | None:
62
+ """Get the most recent attempt for an intent."""
63
+ row = db.fetch_one(Q.GET_LATEST_ATTEMPT_FOR_INTENT, {"intent_id": str(intent_id)})
64
+ return M.row_to_attempt(row) if row else None
65
+
66
+
67
+ def update_attempt_status(db: Database, attempt_id: UUID, status: str) -> bool:
68
+ """Update attempt status."""
69
+ count = db.execute_rowcount(Q.UPDATE_ATTEMPT_STATUS, {
70
+ "attempt_id": str(attempt_id),
71
+ "status": status,
72
+ })
73
+ return count > 0
74
+
75
+
76
+ def update_attempt_included(
77
+ db: Database, attempt_id: UUID, status: str, included_block: int
78
+ ) -> bool:
79
+ """Update attempt with inclusion info."""
80
+ count = db.execute_rowcount(Q.UPDATE_ATTEMPT_INCLUDED, {
81
+ "attempt_id": str(attempt_id),
82
+ "status": status,
83
+ "included_block": included_block,
84
+ })
85
+ return count > 0
86
+
87
+
88
+ def update_attempt_error(
89
+ db: Database,
90
+ attempt_id: UUID,
91
+ status: str,
92
+ error_code: str | None,
93
+ error_detail: str | None,
94
+ ) -> bool:
95
+ """Update attempt with error info."""
96
+ count = db.execute_rowcount(Q.UPDATE_ATTEMPT_ERROR, {
97
+ "attempt_id": str(attempt_id),
98
+ "status": status,
99
+ "error_code": error_code,
100
+ "error_detail": error_detail,
101
+ })
102
+ return count > 0
103
+
104
+
105
+ def get_pending_attempts(db: Database, chain_id: int) -> list[TxAttempt]:
106
+ """Get pending attempts for a chain."""
107
+ rows = db.fetch_all(Q.GET_PENDING_ATTEMPTS, {"chain_id": chain_id})
108
+ return [M.row_to_attempt(row) for row in rows]
@@ -0,0 +1,83 @@
1
+ """Block state and hash history operations."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from brawny.db.base_new import Database, BlockState
6
+ from brawny.db import queries as Q
7
+ from brawny.db import mappers as M
8
+
9
+
10
+ def get_block_state(db: Database, chain_id: int) -> BlockState | None:
11
+ """Get the current block processing state."""
12
+ row = db.fetch_one(Q.GET_BLOCK_STATE, {"chain_id": chain_id})
13
+ return M.row_to_block_state(row) if row else None
14
+
15
+
16
+ def upsert_block_state(
17
+ db: Database, chain_id: int, block_number: int, block_hash: str
18
+ ) -> None:
19
+ """Update or insert block processing state."""
20
+ db.execute(Q.UPSERT_BLOCK_STATE, {
21
+ "chain_id": chain_id,
22
+ "block_number": block_number,
23
+ "block_hash": block_hash,
24
+ })
25
+
26
+
27
+ def get_block_hash_at_height(
28
+ db: Database, chain_id: int, block_number: int
29
+ ) -> str | None:
30
+ """Get stored block hash at a specific height."""
31
+ row = db.fetch_one(Q.GET_BLOCK_HASH_AT_HEIGHT, {
32
+ "chain_id": chain_id,
33
+ "block_number": block_number,
34
+ })
35
+ return row["block_hash"] if row else None
36
+
37
+
38
+ def insert_block_hash(
39
+ db: Database, chain_id: int, block_number: int, block_hash: str
40
+ ) -> None:
41
+ """Insert a block hash into history."""
42
+ db.execute(Q.INSERT_BLOCK_HASH, {
43
+ "chain_id": chain_id,
44
+ "block_number": block_number,
45
+ "block_hash": block_hash,
46
+ })
47
+
48
+
49
+ def delete_block_hashes_above(db: Database, chain_id: int, block_number: int) -> int:
50
+ """Delete block hashes above a certain height (for reorg rewind)."""
51
+ return db.execute_rowcount(Q.DELETE_BLOCK_HASHES_ABOVE, {
52
+ "chain_id": chain_id,
53
+ "block_number": block_number,
54
+ })
55
+
56
+
57
+ def delete_block_hash_at_height(db: Database, chain_id: int, block_number: int) -> bool:
58
+ """Delete a specific block hash (for stale hash cleanup)."""
59
+ count = db.execute_rowcount(Q.DELETE_BLOCK_HASH_AT_HEIGHT, {
60
+ "chain_id": chain_id,
61
+ "block_number": block_number,
62
+ })
63
+ return count > 0
64
+
65
+
66
+ def cleanup_old_block_hashes(db: Database, chain_id: int, keep_count: int) -> int:
67
+ """Delete old block hashes beyond the history window."""
68
+ # Get max block number
69
+ row = db.fetch_one(Q.GET_MAX_BLOCK_IN_HISTORY, {"chain_id": chain_id})
70
+ if not row or row["max_block"] is None:
71
+ return 0
72
+
73
+ cutoff = row["max_block"] - keep_count
74
+ return db.execute_rowcount(Q.DELETE_BLOCK_HASHES_BELOW, {
75
+ "chain_id": chain_id,
76
+ "cutoff": cutoff,
77
+ })
78
+
79
+
80
+ def get_oldest_block_in_history(db: Database, chain_id: int) -> int | None:
81
+ """Get the oldest block number in hash history."""
82
+ row = db.fetch_one(Q.GET_OLDEST_BLOCK_IN_HISTORY, {"chain_id": chain_id})
83
+ return row["min_block"] if row else None