brawny 0.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brawny/__init__.py +106 -0
- brawny/_context.py +232 -0
- brawny/_rpc/__init__.py +38 -0
- brawny/_rpc/broadcast.py +172 -0
- brawny/_rpc/clients.py +98 -0
- brawny/_rpc/context.py +49 -0
- brawny/_rpc/errors.py +252 -0
- brawny/_rpc/gas.py +158 -0
- brawny/_rpc/manager.py +982 -0
- brawny/_rpc/selector.py +156 -0
- brawny/accounts.py +534 -0
- brawny/alerts/__init__.py +132 -0
- brawny/alerts/abi_resolver.py +530 -0
- brawny/alerts/base.py +152 -0
- brawny/alerts/context.py +271 -0
- brawny/alerts/contracts.py +635 -0
- brawny/alerts/encoded_call.py +201 -0
- brawny/alerts/errors.py +267 -0
- brawny/alerts/events.py +680 -0
- brawny/alerts/function_caller.py +364 -0
- brawny/alerts/health.py +185 -0
- brawny/alerts/routing.py +118 -0
- brawny/alerts/send.py +364 -0
- brawny/api.py +660 -0
- brawny/chain.py +93 -0
- brawny/cli/__init__.py +16 -0
- brawny/cli/app.py +17 -0
- brawny/cli/bootstrap.py +37 -0
- brawny/cli/commands/__init__.py +41 -0
- brawny/cli/commands/abi.py +93 -0
- brawny/cli/commands/accounts.py +632 -0
- brawny/cli/commands/console.py +495 -0
- brawny/cli/commands/contract.py +139 -0
- brawny/cli/commands/health.py +112 -0
- brawny/cli/commands/init_project.py +86 -0
- brawny/cli/commands/intents.py +130 -0
- brawny/cli/commands/job_dev.py +254 -0
- brawny/cli/commands/jobs.py +308 -0
- brawny/cli/commands/logs.py +87 -0
- brawny/cli/commands/maintenance.py +182 -0
- brawny/cli/commands/migrate.py +51 -0
- brawny/cli/commands/networks.py +253 -0
- brawny/cli/commands/run.py +249 -0
- brawny/cli/commands/script.py +209 -0
- brawny/cli/commands/signer.py +248 -0
- brawny/cli/helpers.py +265 -0
- brawny/cli_templates.py +1445 -0
- brawny/config/__init__.py +74 -0
- brawny/config/models.py +404 -0
- brawny/config/parser.py +633 -0
- brawny/config/routing.py +55 -0
- brawny/config/validation.py +246 -0
- brawny/daemon/__init__.py +14 -0
- brawny/daemon/context.py +69 -0
- brawny/daemon/core.py +702 -0
- brawny/daemon/loops.py +327 -0
- brawny/db/__init__.py +78 -0
- brawny/db/base.py +986 -0
- brawny/db/base_new.py +165 -0
- brawny/db/circuit_breaker.py +97 -0
- brawny/db/global_cache.py +298 -0
- brawny/db/mappers.py +182 -0
- brawny/db/migrate.py +349 -0
- brawny/db/migrations/001_init.sql +186 -0
- brawny/db/migrations/002_add_included_block.sql +7 -0
- brawny/db/migrations/003_add_broadcast_at.sql +10 -0
- brawny/db/migrations/004_broadcast_binding.sql +20 -0
- brawny/db/migrations/005_add_retry_after.sql +9 -0
- brawny/db/migrations/006_add_retry_count_column.sql +11 -0
- brawny/db/migrations/007_add_gap_tracking.sql +18 -0
- brawny/db/migrations/008_add_transactions.sql +72 -0
- brawny/db/migrations/009_add_intent_metadata.sql +5 -0
- brawny/db/migrations/010_add_nonce_gap_index.sql +9 -0
- brawny/db/migrations/011_add_job_logs.sql +24 -0
- brawny/db/migrations/012_add_claimed_by.sql +5 -0
- brawny/db/ops/__init__.py +29 -0
- brawny/db/ops/attempts.py +108 -0
- brawny/db/ops/blocks.py +83 -0
- brawny/db/ops/cache.py +93 -0
- brawny/db/ops/intents.py +296 -0
- brawny/db/ops/jobs.py +110 -0
- brawny/db/ops/logs.py +97 -0
- brawny/db/ops/nonces.py +322 -0
- brawny/db/postgres.py +2535 -0
- brawny/db/postgres_new.py +196 -0
- brawny/db/queries.py +584 -0
- brawny/db/sqlite.py +2733 -0
- brawny/db/sqlite_new.py +191 -0
- brawny/history.py +126 -0
- brawny/interfaces.py +136 -0
- brawny/invariants.py +155 -0
- brawny/jobs/__init__.py +26 -0
- brawny/jobs/base.py +287 -0
- brawny/jobs/discovery.py +233 -0
- brawny/jobs/job_validation.py +111 -0
- brawny/jobs/kv.py +125 -0
- brawny/jobs/registry.py +283 -0
- brawny/keystore.py +484 -0
- brawny/lifecycle.py +551 -0
- brawny/logging.py +290 -0
- brawny/metrics.py +594 -0
- brawny/model/__init__.py +53 -0
- brawny/model/contexts.py +319 -0
- brawny/model/enums.py +70 -0
- brawny/model/errors.py +194 -0
- brawny/model/events.py +93 -0
- brawny/model/startup.py +20 -0
- brawny/model/types.py +483 -0
- brawny/networks/__init__.py +96 -0
- brawny/networks/config.py +269 -0
- brawny/networks/manager.py +423 -0
- brawny/obs/__init__.py +67 -0
- brawny/obs/emit.py +158 -0
- brawny/obs/health.py +175 -0
- brawny/obs/heartbeat.py +133 -0
- brawny/reconciliation.py +108 -0
- brawny/scheduler/__init__.py +19 -0
- brawny/scheduler/poller.py +472 -0
- brawny/scheduler/reorg.py +632 -0
- brawny/scheduler/runner.py +708 -0
- brawny/scheduler/shutdown.py +371 -0
- brawny/script_tx.py +297 -0
- brawny/scripting.py +251 -0
- brawny/startup.py +76 -0
- brawny/telegram.py +393 -0
- brawny/testing.py +108 -0
- brawny/tx/__init__.py +41 -0
- brawny/tx/executor.py +1071 -0
- brawny/tx/fees.py +50 -0
- brawny/tx/intent.py +423 -0
- brawny/tx/monitor.py +628 -0
- brawny/tx/nonce.py +498 -0
- brawny/tx/replacement.py +456 -0
- brawny/tx/utils.py +26 -0
- brawny/utils.py +205 -0
- brawny/validation.py +69 -0
- brawny-0.1.13.dist-info/METADATA +156 -0
- brawny-0.1.13.dist-info/RECORD +141 -0
- brawny-0.1.13.dist-info/WHEEL +5 -0
- brawny-0.1.13.dist-info/entry_points.txt +2 -0
- brawny-0.1.13.dist-info/top_level.txt +1 -0
brawny/daemon/loops.py
ADDED
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
"""Worker and monitor loops for brawny daemon.
|
|
2
|
+
|
|
3
|
+
Provides the main loop functions for intent execution and transaction monitoring.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import time
|
|
9
|
+
from datetime import datetime, timedelta
|
|
10
|
+
from threading import Event
|
|
11
|
+
from typing import TYPE_CHECKING
|
|
12
|
+
|
|
13
|
+
from brawny.metrics import (
|
|
14
|
+
ACTIVE_WORKERS,
|
|
15
|
+
INTENT_CLAIMED,
|
|
16
|
+
INTENT_RELEASED,
|
|
17
|
+
INTENT_SENDING_STUCK,
|
|
18
|
+
INTENTS_BACKING_OFF,
|
|
19
|
+
get_metrics,
|
|
20
|
+
)
|
|
21
|
+
from brawny.model.enums import AttemptStatus, IntentStatus
|
|
22
|
+
from brawny.tx.intent import transition_intent
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
from threading import Thread
|
|
26
|
+
from brawny.daemon.context import DaemonContext, DaemonState
|
|
27
|
+
|
|
28
|
+
from brawny.alerts.health import cleanup_stale_fingerprints, health_alert
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def run_worker(
|
|
32
|
+
worker_id: int,
|
|
33
|
+
stop_event: Event,
|
|
34
|
+
wakeup_hint: Event,
|
|
35
|
+
ctx: "DaemonContext",
|
|
36
|
+
state: "DaemonState",
|
|
37
|
+
dry_run: bool = False,
|
|
38
|
+
) -> None:
|
|
39
|
+
"""Worker thread for executing intents.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
worker_id: Worker identifier for logging
|
|
43
|
+
stop_event: Event signaling shutdown
|
|
44
|
+
wakeup_hint: Event for immediate wakeup on new intents
|
|
45
|
+
ctx: Daemon context with shared components
|
|
46
|
+
state: Daemon state with callbacks
|
|
47
|
+
dry_run: If True, claim and release without executing
|
|
48
|
+
"""
|
|
49
|
+
assert ctx.executor is not None or dry_run, "run_worker requires executor unless dry_run"
|
|
50
|
+
|
|
51
|
+
ctx.log.debug("worker.started", worker_id=worker_id)
|
|
52
|
+
|
|
53
|
+
while not stop_event.is_set():
|
|
54
|
+
released = ctx.db.release_stale_intent_claims(
|
|
55
|
+
max_age_seconds=ctx.config.claim_timeout_seconds
|
|
56
|
+
)
|
|
57
|
+
if released > 0:
|
|
58
|
+
ctx.log.info(
|
|
59
|
+
"worker.stale_claims_released",
|
|
60
|
+
worker_id=worker_id,
|
|
61
|
+
released=released,
|
|
62
|
+
)
|
|
63
|
+
metrics = get_metrics()
|
|
64
|
+
metrics.counter(INTENT_RELEASED).inc(
|
|
65
|
+
released,
|
|
66
|
+
chain_id=ctx.chain_id,
|
|
67
|
+
reason="stale_claim",
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
claim_token = state.make_claim_token(worker_id)
|
|
71
|
+
claimed_by = state.make_claimed_by(worker_id)
|
|
72
|
+
intent = ctx.db.claim_next_intent(claim_token, claimed_by=claimed_by)
|
|
73
|
+
|
|
74
|
+
if intent is None:
|
|
75
|
+
wakeup_hint.wait(timeout=1.0)
|
|
76
|
+
wakeup_hint.clear()
|
|
77
|
+
continue
|
|
78
|
+
|
|
79
|
+
ctx.log.info(
|
|
80
|
+
"intent.claimed",
|
|
81
|
+
intent_id=str(intent.intent_id),
|
|
82
|
+
job_id=intent.job_id,
|
|
83
|
+
claim_token=claim_token,
|
|
84
|
+
claimed_by=claimed_by,
|
|
85
|
+
worker_id=worker_id,
|
|
86
|
+
)
|
|
87
|
+
metrics = get_metrics()
|
|
88
|
+
metrics.counter(INTENT_CLAIMED).inc(
|
|
89
|
+
chain_id=ctx.chain_id,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
if dry_run:
|
|
93
|
+
ctx.log.info("worker.dry_run", intent_id=str(intent.intent_id))
|
|
94
|
+
released = ctx.db.release_intent_claim(intent.intent_id)
|
|
95
|
+
if not released:
|
|
96
|
+
ctx.log.warning(
|
|
97
|
+
"worker.dry_run_release_failed",
|
|
98
|
+
intent_id=str(intent.intent_id),
|
|
99
|
+
)
|
|
100
|
+
else:
|
|
101
|
+
metrics = get_metrics()
|
|
102
|
+
metrics.counter(INTENT_RELEASED).inc(
|
|
103
|
+
chain_id=ctx.chain_id,
|
|
104
|
+
reason="dry_run",
|
|
105
|
+
)
|
|
106
|
+
continue
|
|
107
|
+
|
|
108
|
+
state.inflight_inc()
|
|
109
|
+
try:
|
|
110
|
+
outcome = ctx.executor.execute(intent)
|
|
111
|
+
ctx.log.info(
|
|
112
|
+
"worker.executed",
|
|
113
|
+
intent_id=str(intent.intent_id),
|
|
114
|
+
job_id=intent.job_id,
|
|
115
|
+
result=outcome.result.value,
|
|
116
|
+
)
|
|
117
|
+
except Exception as e:
|
|
118
|
+
ctx.log.error(
|
|
119
|
+
"worker.execute_exception",
|
|
120
|
+
intent_id=str(intent.intent_id),
|
|
121
|
+
job_id=intent.job_id,
|
|
122
|
+
error=str(e)[:200],
|
|
123
|
+
exc_info=True,
|
|
124
|
+
)
|
|
125
|
+
health_alert(
|
|
126
|
+
component="brawny.tx.executor",
|
|
127
|
+
chain_id=ctx.chain_id,
|
|
128
|
+
error=e,
|
|
129
|
+
job_id=intent.job_id,
|
|
130
|
+
intent_id=str(intent.intent_id),
|
|
131
|
+
claim_token=intent.claim_token,
|
|
132
|
+
status=intent.status.value if hasattr(intent.status, "value") else str(intent.status),
|
|
133
|
+
action="Check logs; intent will retry or timeout",
|
|
134
|
+
db_dialect=ctx.db.dialect,
|
|
135
|
+
send_fn=ctx.health_send_fn,
|
|
136
|
+
health_chat_id=ctx.health_chat_id,
|
|
137
|
+
cooldown_seconds=ctx.health_cooldown,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
try:
|
|
141
|
+
attempts = ctx.db.get_attempts_for_intent(intent.intent_id)
|
|
142
|
+
except Exception as query_err:
|
|
143
|
+
ctx.log.warning(
|
|
144
|
+
"worker.exception_attempts_lookup_failed",
|
|
145
|
+
intent_id=str(intent.intent_id),
|
|
146
|
+
job_id=intent.job_id,
|
|
147
|
+
error=str(query_err)[:200],
|
|
148
|
+
)
|
|
149
|
+
attempts = None
|
|
150
|
+
|
|
151
|
+
if attempts == []:
|
|
152
|
+
if not intent.claim_token:
|
|
153
|
+
ctx.log.warning(
|
|
154
|
+
"worker.claim_token_missing",
|
|
155
|
+
intent_id=str(intent.intent_id),
|
|
156
|
+
job_id=intent.job_id,
|
|
157
|
+
)
|
|
158
|
+
else:
|
|
159
|
+
try:
|
|
160
|
+
released = ctx.db.release_intent_claim_if_token(
|
|
161
|
+
intent.intent_id,
|
|
162
|
+
intent.claim_token,
|
|
163
|
+
)
|
|
164
|
+
if released:
|
|
165
|
+
ctx.log.info(
|
|
166
|
+
"worker.claim_released_on_error",
|
|
167
|
+
intent_id=str(intent.intent_id),
|
|
168
|
+
)
|
|
169
|
+
metrics = get_metrics()
|
|
170
|
+
metrics.counter(INTENT_RELEASED).inc(
|
|
171
|
+
chain_id=ctx.chain_id,
|
|
172
|
+
reason="pre_attempt_exception",
|
|
173
|
+
)
|
|
174
|
+
except Exception:
|
|
175
|
+
ctx.log.exception(
|
|
176
|
+
"worker.claim_release_failed",
|
|
177
|
+
intent_id=str(intent.intent_id),
|
|
178
|
+
)
|
|
179
|
+
else:
|
|
180
|
+
ctx.log.warning(
|
|
181
|
+
"worker.exception_with_attempts",
|
|
182
|
+
intent_id=str(intent.intent_id),
|
|
183
|
+
attempt_count=(len(attempts) if attempts is not None else None),
|
|
184
|
+
hint="Not releasing claim; monitor/replacer should handle",
|
|
185
|
+
)
|
|
186
|
+
finally:
|
|
187
|
+
state.inflight_dec()
|
|
188
|
+
|
|
189
|
+
ctx.log.debug("worker.stopped", worker_id=worker_id)
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def run_monitor(
|
|
193
|
+
stop_event: Event,
|
|
194
|
+
ctx: "DaemonContext",
|
|
195
|
+
worker_threads: list["Thread"],
|
|
196
|
+
) -> None:
|
|
197
|
+
"""Background loop for monitoring pending transactions.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
stop_event: Event signaling shutdown
|
|
201
|
+
ctx: Daemon context with shared components
|
|
202
|
+
worker_threads: List of worker threads for gauge reporting
|
|
203
|
+
"""
|
|
204
|
+
assert ctx.monitor is not None, "run_monitor requires monitor"
|
|
205
|
+
assert ctx.replacer is not None, "run_monitor requires replacer"
|
|
206
|
+
assert ctx.nonce_manager is not None, "run_monitor requires nonce_manager"
|
|
207
|
+
|
|
208
|
+
ctx.log.debug("monitor.started")
|
|
209
|
+
last_reconcile = time.time()
|
|
210
|
+
last_rpc_health = 0.0
|
|
211
|
+
last_worker_gauge = 0.0
|
|
212
|
+
last_sending_recover = 0.0
|
|
213
|
+
last_log_cleanup = 0.0
|
|
214
|
+
|
|
215
|
+
while not stop_event.is_set():
|
|
216
|
+
try:
|
|
217
|
+
ctx.monitor.monitor_all_pending()
|
|
218
|
+
ctx.replacer.process_stuck_transactions()
|
|
219
|
+
|
|
220
|
+
now = time.time()
|
|
221
|
+
if now - last_reconcile >= ctx.config.nonce_reconcile_interval_seconds:
|
|
222
|
+
ctx.nonce_manager.reconcile()
|
|
223
|
+
last_reconcile = now
|
|
224
|
+
|
|
225
|
+
if now - last_rpc_health >= 30:
|
|
226
|
+
ctx.rpc.get_health()
|
|
227
|
+
last_rpc_health = now
|
|
228
|
+
|
|
229
|
+
if now - last_worker_gauge >= 10:
|
|
230
|
+
metrics = get_metrics()
|
|
231
|
+
active = sum(1 for t in worker_threads if t.is_alive())
|
|
232
|
+
metrics.gauge(ACTIVE_WORKERS).set(
|
|
233
|
+
active,
|
|
234
|
+
chain_id=ctx.chain_id,
|
|
235
|
+
)
|
|
236
|
+
backing_off = ctx.db.get_backing_off_intent_count(chain_id=ctx.chain_id)
|
|
237
|
+
metrics.gauge(INTENTS_BACKING_OFF).set(
|
|
238
|
+
backing_off,
|
|
239
|
+
chain_id=ctx.chain_id,
|
|
240
|
+
)
|
|
241
|
+
last_worker_gauge = now
|
|
242
|
+
|
|
243
|
+
if now - last_sending_recover >= 30:
|
|
244
|
+
_recover_stuck_sending(ctx)
|
|
245
|
+
last_sending_recover = now
|
|
246
|
+
|
|
247
|
+
# Job log cleanup (hourly)
|
|
248
|
+
if now - last_log_cleanup >= 3600:
|
|
249
|
+
try:
|
|
250
|
+
from brawny.db.ops import logs as log_ops
|
|
251
|
+
cutoff = datetime.utcnow() - timedelta(days=ctx.config.log_retention_days)
|
|
252
|
+
deleted = log_ops.delete_old_logs(ctx.db, ctx.chain_id, cutoff)
|
|
253
|
+
if deleted > 0:
|
|
254
|
+
ctx.log.info("job_logs.cleanup", deleted=deleted)
|
|
255
|
+
except Exception as cleanup_err:
|
|
256
|
+
ctx.log.warning("job_logs.cleanup_failed", error=str(cleanup_err))
|
|
257
|
+
|
|
258
|
+
# Health alert fingerprint cleanup (also hourly)
|
|
259
|
+
try:
|
|
260
|
+
removed = cleanup_stale_fingerprints(ctx.health_cooldown)
|
|
261
|
+
if removed > 0:
|
|
262
|
+
ctx.log.debug("health_fingerprints.cleanup", removed=removed)
|
|
263
|
+
except Exception as cleanup_err:
|
|
264
|
+
ctx.log.warning("health_fingerprints.cleanup_failed", error=str(cleanup_err))
|
|
265
|
+
|
|
266
|
+
last_log_cleanup = now
|
|
267
|
+
except Exception as e:
|
|
268
|
+
ctx.log.error("monitor.error", error=str(e)[:200])
|
|
269
|
+
health_alert(
|
|
270
|
+
component="brawny.tx.monitor",
|
|
271
|
+
chain_id=ctx.chain_id,
|
|
272
|
+
error=e,
|
|
273
|
+
action="Check DB/RPC connectivity",
|
|
274
|
+
db_dialect=ctx.db.dialect,
|
|
275
|
+
send_fn=ctx.health_send_fn,
|
|
276
|
+
health_chat_id=ctx.health_chat_id,
|
|
277
|
+
cooldown_seconds=ctx.health_cooldown,
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
stop_event.wait(timeout=ctx.config.poll_interval_seconds * 2)
|
|
281
|
+
|
|
282
|
+
ctx.log.debug("monitor.stopped")
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
def _recover_stuck_sending(ctx: "DaemonContext") -> None:
|
|
286
|
+
"""Recover intents stuck in SENDING state.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
ctx: Daemon context with shared components
|
|
290
|
+
"""
|
|
291
|
+
assert ctx.nonce_manager is not None, "_recover_stuck_sending requires nonce_manager"
|
|
292
|
+
|
|
293
|
+
stuck_sending = ctx.db.list_sending_intents_older_than(
|
|
294
|
+
max_age_seconds=ctx.config.claim_timeout_seconds,
|
|
295
|
+
chain_id=ctx.chain_id,
|
|
296
|
+
)
|
|
297
|
+
for intent in stuck_sending:
|
|
298
|
+
attempt = ctx.db.get_latest_attempt_for_intent(intent.intent_id)
|
|
299
|
+
if attempt and attempt.tx_hash:
|
|
300
|
+
transition_intent(
|
|
301
|
+
ctx.db,
|
|
302
|
+
intent.intent_id,
|
|
303
|
+
IntentStatus.PENDING,
|
|
304
|
+
"sending_recover",
|
|
305
|
+
chain_id=ctx.chain_id,
|
|
306
|
+
)
|
|
307
|
+
else:
|
|
308
|
+
if attempt:
|
|
309
|
+
ctx.db.update_attempt_status(
|
|
310
|
+
attempt.attempt_id,
|
|
311
|
+
AttemptStatus.FAILED.value,
|
|
312
|
+
error_code="sending_stuck",
|
|
313
|
+
error_detail="Intent stuck in sending without broadcast",
|
|
314
|
+
)
|
|
315
|
+
ctx.nonce_manager.release(intent.signer_address, attempt.nonce)
|
|
316
|
+
transition_intent(
|
|
317
|
+
ctx.db,
|
|
318
|
+
intent.intent_id,
|
|
319
|
+
IntentStatus.CREATED,
|
|
320
|
+
"sending_stuck",
|
|
321
|
+
chain_id=ctx.chain_id,
|
|
322
|
+
)
|
|
323
|
+
metrics = get_metrics()
|
|
324
|
+
metrics.counter(INTENT_SENDING_STUCK).inc(
|
|
325
|
+
chain_id=ctx.chain_id,
|
|
326
|
+
age_bucket=">claim_timeout",
|
|
327
|
+
)
|
brawny/db/__init__.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"""Database layer with support for PostgreSQL (production) and SQLite (development)."""
|
|
2
|
+
|
|
3
|
+
from brawny.db.base import (
|
|
4
|
+
ABICacheEntry,
|
|
5
|
+
BlockHashEntry,
|
|
6
|
+
BlockState,
|
|
7
|
+
Database,
|
|
8
|
+
IsolationLevel,
|
|
9
|
+
ProxyCacheEntry,
|
|
10
|
+
)
|
|
11
|
+
from brawny.db.migrate import Migrator, discover_migrations, get_pending_migrations
|
|
12
|
+
try:
|
|
13
|
+
from brawny.db.postgres import PostgresDatabase
|
|
14
|
+
except ModuleNotFoundError:
|
|
15
|
+
PostgresDatabase = None # type: ignore
|
|
16
|
+
from brawny.db.sqlite import SQLiteDatabase
|
|
17
|
+
|
|
18
|
+
__all__ = [
|
|
19
|
+
# Base classes
|
|
20
|
+
"Database",
|
|
21
|
+
"IsolationLevel",
|
|
22
|
+
# Data classes
|
|
23
|
+
"BlockState",
|
|
24
|
+
"BlockHashEntry",
|
|
25
|
+
"ABICacheEntry",
|
|
26
|
+
"ProxyCacheEntry",
|
|
27
|
+
# Implementations
|
|
28
|
+
"SQLiteDatabase",
|
|
29
|
+
"PostgresDatabase",
|
|
30
|
+
# Migration
|
|
31
|
+
"Migrator",
|
|
32
|
+
"discover_migrations",
|
|
33
|
+
"get_pending_migrations",
|
|
34
|
+
# Factory
|
|
35
|
+
"create_database",
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def create_database(database_url: str, **kwargs: object) -> Database:
|
|
40
|
+
"""Factory function to create a database instance based on URL.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
database_url: Database connection URL
|
|
44
|
+
- sqlite:///path/to/db.sqlite
|
|
45
|
+
- postgresql://user:pass@host:port/dbname
|
|
46
|
+
**kwargs: Additional arguments passed to the database constructor
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
Database instance (SQLiteDatabase or PostgresDatabase)
|
|
50
|
+
|
|
51
|
+
Raises:
|
|
52
|
+
ValueError: If database URL scheme is not supported
|
|
53
|
+
"""
|
|
54
|
+
circuit_breaker_failures = int(kwargs.pop("circuit_breaker_failures", 5))
|
|
55
|
+
circuit_breaker_seconds = int(kwargs.pop("circuit_breaker_seconds", 30))
|
|
56
|
+
if database_url.startswith("sqlite:///"):
|
|
57
|
+
return SQLiteDatabase(
|
|
58
|
+
database_url,
|
|
59
|
+
circuit_breaker_failures=circuit_breaker_failures,
|
|
60
|
+
circuit_breaker_seconds=circuit_breaker_seconds,
|
|
61
|
+
)
|
|
62
|
+
elif database_url.startswith(("postgresql://", "postgres://")):
|
|
63
|
+
if PostgresDatabase is None:
|
|
64
|
+
raise ValueError(
|
|
65
|
+
"Postgres support requires psycopg and psycopg-pool. "
|
|
66
|
+
"Install with: pip install psycopg[binary] psycopg-pool"
|
|
67
|
+
)
|
|
68
|
+
return PostgresDatabase( # type: ignore
|
|
69
|
+
database_url,
|
|
70
|
+
circuit_breaker_failures=circuit_breaker_failures,
|
|
71
|
+
circuit_breaker_seconds=circuit_breaker_seconds,
|
|
72
|
+
**kwargs,
|
|
73
|
+
)
|
|
74
|
+
else:
|
|
75
|
+
raise ValueError(
|
|
76
|
+
f"Unsupported database URL: {database_url}. "
|
|
77
|
+
"Must start with 'sqlite:///', 'postgresql://', or 'postgres://'"
|
|
78
|
+
)
|