svc-infra 0.1.595__py3-none-any.whl → 0.1.706__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of svc-infra might be problematic. Click here for more details.
- svc_infra/__init__.py +58 -2
- svc_infra/apf_payments/models.py +133 -42
- svc_infra/apf_payments/provider/aiydan.py +121 -47
- svc_infra/apf_payments/provider/base.py +30 -9
- svc_infra/apf_payments/provider/stripe.py +156 -62
- svc_infra/apf_payments/schemas.py +18 -9
- svc_infra/apf_payments/service.py +98 -41
- svc_infra/apf_payments/settings.py +5 -1
- svc_infra/api/__init__.py +61 -0
- svc_infra/api/fastapi/__init__.py +15 -0
- svc_infra/api/fastapi/admin/__init__.py +3 -0
- svc_infra/api/fastapi/admin/add.py +245 -0
- svc_infra/api/fastapi/apf_payments/router.py +128 -70
- svc_infra/api/fastapi/apf_payments/setup.py +13 -6
- svc_infra/api/fastapi/auth/__init__.py +65 -0
- svc_infra/api/fastapi/auth/_cookies.py +6 -2
- svc_infra/api/fastapi/auth/add.py +17 -14
- svc_infra/api/fastapi/auth/gaurd.py +45 -16
- svc_infra/api/fastapi/auth/mfa/models.py +3 -1
- svc_infra/api/fastapi/auth/mfa/pre_auth.py +10 -6
- svc_infra/api/fastapi/auth/mfa/router.py +15 -8
- svc_infra/api/fastapi/auth/mfa/security.py +1 -2
- svc_infra/api/fastapi/auth/mfa/utils.py +2 -1
- svc_infra/api/fastapi/auth/mfa/verify.py +9 -2
- svc_infra/api/fastapi/auth/policy.py +0 -1
- svc_infra/api/fastapi/auth/providers.py +3 -1
- svc_infra/api/fastapi/auth/routers/apikey_router.py +6 -6
- svc_infra/api/fastapi/auth/routers/oauth_router.py +146 -52
- svc_infra/api/fastapi/auth/routers/session_router.py +6 -2
- svc_infra/api/fastapi/auth/security.py +31 -10
- svc_infra/api/fastapi/auth/sender.py +8 -1
- svc_infra/api/fastapi/auth/state.py +3 -1
- svc_infra/api/fastapi/auth/ws_security.py +275 -0
- svc_infra/api/fastapi/billing/router.py +73 -0
- svc_infra/api/fastapi/billing/setup.py +19 -0
- svc_infra/api/fastapi/cache/add.py +9 -5
- svc_infra/api/fastapi/db/__init__.py +5 -1
- svc_infra/api/fastapi/db/http.py +3 -1
- svc_infra/api/fastapi/db/nosql/__init__.py +39 -1
- svc_infra/api/fastapi/db/nosql/mongo/add.py +47 -32
- svc_infra/api/fastapi/db/nosql/mongo/crud_router.py +30 -11
- svc_infra/api/fastapi/db/sql/__init__.py +5 -1
- svc_infra/api/fastapi/db/sql/add.py +71 -26
- svc_infra/api/fastapi/db/sql/crud_router.py +210 -22
- svc_infra/api/fastapi/db/sql/health.py +3 -1
- svc_infra/api/fastapi/db/sql/session.py +18 -0
- svc_infra/api/fastapi/db/sql/users.py +18 -6
- svc_infra/api/fastapi/dependencies/ratelimit.py +78 -14
- svc_infra/api/fastapi/docs/add.py +173 -0
- svc_infra/api/fastapi/docs/landing.py +4 -2
- svc_infra/api/fastapi/docs/scoped.py +62 -15
- svc_infra/api/fastapi/dual/__init__.py +12 -2
- svc_infra/api/fastapi/dual/dualize.py +1 -1
- svc_infra/api/fastapi/dual/protected.py +126 -4
- svc_infra/api/fastapi/dual/public.py +25 -0
- svc_infra/api/fastapi/dual/router.py +40 -13
- svc_infra/api/fastapi/dx.py +33 -2
- svc_infra/api/fastapi/ease.py +10 -2
- svc_infra/api/fastapi/http/concurrency.py +2 -1
- svc_infra/api/fastapi/http/conditional.py +3 -1
- svc_infra/api/fastapi/middleware/debug.py +4 -1
- svc_infra/api/fastapi/middleware/errors/catchall.py +6 -2
- svc_infra/api/fastapi/middleware/errors/exceptions.py +1 -1
- svc_infra/api/fastapi/middleware/errors/handlers.py +54 -8
- svc_infra/api/fastapi/middleware/graceful_shutdown.py +104 -0
- svc_infra/api/fastapi/middleware/idempotency.py +197 -70
- svc_infra/api/fastapi/middleware/idempotency_store.py +187 -0
- svc_infra/api/fastapi/middleware/optimistic_lock.py +42 -0
- svc_infra/api/fastapi/middleware/ratelimit.py +125 -28
- svc_infra/api/fastapi/middleware/ratelimit_store.py +43 -10
- svc_infra/api/fastapi/middleware/request_id.py +27 -11
- svc_infra/api/fastapi/middleware/request_size_limit.py +3 -3
- svc_infra/api/fastapi/middleware/timeout.py +177 -0
- svc_infra/api/fastapi/openapi/apply.py +5 -3
- svc_infra/api/fastapi/openapi/conventions.py +9 -2
- svc_infra/api/fastapi/openapi/mutators.py +165 -20
- svc_infra/api/fastapi/openapi/pipeline.py +1 -1
- svc_infra/api/fastapi/openapi/security.py +3 -1
- svc_infra/api/fastapi/ops/add.py +75 -0
- svc_infra/api/fastapi/pagination.py +47 -20
- svc_infra/api/fastapi/routers/__init__.py +43 -15
- svc_infra/api/fastapi/routers/ping.py +1 -0
- svc_infra/api/fastapi/setup.py +188 -57
- svc_infra/api/fastapi/tenancy/add.py +19 -0
- svc_infra/api/fastapi/tenancy/context.py +112 -0
- svc_infra/api/fastapi/versioned.py +101 -0
- svc_infra/app/README.md +5 -5
- svc_infra/app/__init__.py +3 -1
- svc_infra/app/env.py +69 -1
- svc_infra/app/logging/add.py +9 -2
- svc_infra/app/logging/formats.py +12 -5
- svc_infra/billing/__init__.py +23 -0
- svc_infra/billing/async_service.py +147 -0
- svc_infra/billing/jobs.py +241 -0
- svc_infra/billing/models.py +177 -0
- svc_infra/billing/quotas.py +103 -0
- svc_infra/billing/schemas.py +36 -0
- svc_infra/billing/service.py +123 -0
- svc_infra/bundled_docs/README.md +5 -0
- svc_infra/bundled_docs/__init__.py +1 -0
- svc_infra/bundled_docs/getting-started.md +6 -0
- svc_infra/cache/__init__.py +9 -0
- svc_infra/cache/add.py +170 -0
- svc_infra/cache/backend.py +7 -6
- svc_infra/cache/decorators.py +81 -15
- svc_infra/cache/demo.py +2 -2
- svc_infra/cache/keys.py +24 -4
- svc_infra/cache/recache.py +26 -14
- svc_infra/cache/resources.py +14 -5
- svc_infra/cache/tags.py +19 -44
- svc_infra/cache/utils.py +3 -1
- svc_infra/cli/__init__.py +52 -8
- svc_infra/cli/__main__.py +4 -0
- svc_infra/cli/cmds/__init__.py +39 -2
- svc_infra/cli/cmds/db/nosql/mongo/mongo_cmds.py +7 -4
- svc_infra/cli/cmds/db/nosql/mongo/mongo_scaffold_cmds.py +7 -5
- svc_infra/cli/cmds/db/ops_cmds.py +270 -0
- svc_infra/cli/cmds/db/sql/alembic_cmds.py +103 -18
- svc_infra/cli/cmds/db/sql/sql_export_cmds.py +88 -0
- svc_infra/cli/cmds/db/sql/sql_scaffold_cmds.py +3 -3
- svc_infra/cli/cmds/docs/docs_cmds.py +142 -0
- svc_infra/cli/cmds/dx/__init__.py +12 -0
- svc_infra/cli/cmds/dx/dx_cmds.py +116 -0
- svc_infra/cli/cmds/health/__init__.py +179 -0
- svc_infra/cli/cmds/health/health_cmds.py +8 -0
- svc_infra/cli/cmds/help.py +4 -0
- svc_infra/cli/cmds/jobs/__init__.py +1 -0
- svc_infra/cli/cmds/jobs/jobs_cmds.py +47 -0
- svc_infra/cli/cmds/obs/obs_cmds.py +36 -15
- svc_infra/cli/cmds/sdk/__init__.py +0 -0
- svc_infra/cli/cmds/sdk/sdk_cmds.py +112 -0
- svc_infra/cli/foundation/runner.py +6 -2
- svc_infra/data/add.py +61 -0
- svc_infra/data/backup.py +58 -0
- svc_infra/data/erasure.py +45 -0
- svc_infra/data/fixtures.py +42 -0
- svc_infra/data/retention.py +61 -0
- svc_infra/db/__init__.py +15 -0
- svc_infra/db/crud_schema.py +9 -9
- svc_infra/db/inbox.py +67 -0
- svc_infra/db/nosql/__init__.py +3 -0
- svc_infra/db/nosql/core.py +30 -9
- svc_infra/db/nosql/indexes.py +3 -1
- svc_infra/db/nosql/management.py +1 -1
- svc_infra/db/nosql/mongo/README.md +13 -13
- svc_infra/db/nosql/mongo/client.py +19 -2
- svc_infra/db/nosql/mongo/settings.py +6 -2
- svc_infra/db/nosql/repository.py +35 -15
- svc_infra/db/nosql/resource.py +20 -3
- svc_infra/db/nosql/scaffold.py +9 -3
- svc_infra/db/nosql/service.py +3 -1
- svc_infra/db/nosql/types.py +6 -2
- svc_infra/db/ops.py +384 -0
- svc_infra/db/outbox.py +108 -0
- svc_infra/db/sql/apikey.py +37 -9
- svc_infra/db/sql/authref.py +9 -3
- svc_infra/db/sql/constants.py +12 -8
- svc_infra/db/sql/core.py +2 -2
- svc_infra/db/sql/management.py +11 -8
- svc_infra/db/sql/repository.py +99 -26
- svc_infra/db/sql/resource.py +5 -0
- svc_infra/db/sql/scaffold.py +6 -2
- svc_infra/db/sql/service.py +15 -5
- svc_infra/db/sql/templates/models_schemas/auth/models.py.tmpl +7 -56
- svc_infra/db/sql/templates/setup/env_async.py.tmpl +34 -12
- svc_infra/db/sql/templates/setup/env_sync.py.tmpl +29 -7
- svc_infra/db/sql/tenant.py +88 -0
- svc_infra/db/sql/uniq_hooks.py +9 -3
- svc_infra/db/sql/utils.py +138 -51
- svc_infra/db/sql/versioning.py +14 -0
- svc_infra/deploy/__init__.py +538 -0
- svc_infra/documents/__init__.py +100 -0
- svc_infra/documents/add.py +264 -0
- svc_infra/documents/ease.py +233 -0
- svc_infra/documents/models.py +114 -0
- svc_infra/documents/storage.py +264 -0
- svc_infra/dx/add.py +65 -0
- svc_infra/dx/changelog.py +74 -0
- svc_infra/dx/checks.py +68 -0
- svc_infra/exceptions.py +141 -0
- svc_infra/health/__init__.py +864 -0
- svc_infra/http/__init__.py +13 -0
- svc_infra/http/client.py +105 -0
- svc_infra/jobs/builtins/outbox_processor.py +40 -0
- svc_infra/jobs/builtins/webhook_delivery.py +95 -0
- svc_infra/jobs/easy.py +33 -0
- svc_infra/jobs/loader.py +50 -0
- svc_infra/jobs/queue.py +116 -0
- svc_infra/jobs/redis_queue.py +256 -0
- svc_infra/jobs/runner.py +79 -0
- svc_infra/jobs/scheduler.py +53 -0
- svc_infra/jobs/worker.py +40 -0
- svc_infra/loaders/__init__.py +186 -0
- svc_infra/loaders/base.py +142 -0
- svc_infra/loaders/github.py +311 -0
- svc_infra/loaders/models.py +147 -0
- svc_infra/loaders/url.py +235 -0
- svc_infra/logging/__init__.py +374 -0
- svc_infra/mcp/svc_infra_mcp.py +91 -33
- svc_infra/obs/README.md +2 -0
- svc_infra/obs/add.py +65 -9
- svc_infra/obs/cloud_dash.py +2 -1
- svc_infra/obs/grafana/dashboards/http-overview.json +45 -0
- svc_infra/obs/metrics/__init__.py +3 -4
- svc_infra/obs/metrics/asgi.py +13 -7
- svc_infra/obs/metrics/http.py +9 -5
- svc_infra/obs/metrics/sqlalchemy.py +13 -9
- svc_infra/obs/metrics.py +6 -5
- svc_infra/obs/settings.py +6 -2
- svc_infra/security/add.py +217 -0
- svc_infra/security/audit.py +92 -10
- svc_infra/security/audit_service.py +4 -3
- svc_infra/security/headers.py +15 -2
- svc_infra/security/hibp.py +14 -4
- svc_infra/security/jwt_rotation.py +74 -22
- svc_infra/security/lockout.py +11 -5
- svc_infra/security/models.py +54 -12
- svc_infra/security/oauth_models.py +73 -0
- svc_infra/security/org_invites.py +5 -3
- svc_infra/security/passwords.py +3 -1
- svc_infra/security/permissions.py +25 -2
- svc_infra/security/session.py +1 -1
- svc_infra/security/signed_cookies.py +21 -1
- svc_infra/storage/__init__.py +93 -0
- svc_infra/storage/add.py +253 -0
- svc_infra/storage/backends/__init__.py +11 -0
- svc_infra/storage/backends/local.py +339 -0
- svc_infra/storage/backends/memory.py +216 -0
- svc_infra/storage/backends/s3.py +353 -0
- svc_infra/storage/base.py +239 -0
- svc_infra/storage/easy.py +185 -0
- svc_infra/storage/settings.py +195 -0
- svc_infra/testing/__init__.py +685 -0
- svc_infra/utils.py +7 -3
- svc_infra/webhooks/__init__.py +69 -0
- svc_infra/webhooks/add.py +339 -0
- svc_infra/webhooks/encryption.py +115 -0
- svc_infra/webhooks/fastapi.py +39 -0
- svc_infra/webhooks/router.py +55 -0
- svc_infra/webhooks/service.py +70 -0
- svc_infra/webhooks/signing.py +34 -0
- svc_infra/websocket/__init__.py +79 -0
- svc_infra/websocket/add.py +140 -0
- svc_infra/websocket/client.py +282 -0
- svc_infra/websocket/config.py +69 -0
- svc_infra/websocket/easy.py +76 -0
- svc_infra/websocket/exceptions.py +61 -0
- svc_infra/websocket/manager.py +344 -0
- svc_infra/websocket/models.py +49 -0
- svc_infra-0.1.706.dist-info/LICENSE +21 -0
- svc_infra-0.1.706.dist-info/METADATA +356 -0
- svc_infra-0.1.706.dist-info/RECORD +357 -0
- svc_infra-0.1.595.dist-info/METADATA +0 -80
- svc_infra-0.1.595.dist-info/RECORD +0 -253
- {svc_infra-0.1.595.dist-info → svc_infra-0.1.706.dist-info}/WHEEL +0 -0
- {svc_infra-0.1.595.dist-info → svc_infra-0.1.706.dist-info}/entry_points.txt +0 -0
svc_infra/db/ops.py
ADDED
|
@@ -0,0 +1,384 @@
|
|
|
1
|
+
"""Database operations utilities for one-off administrative tasks.
|
|
2
|
+
|
|
3
|
+
This module provides synchronous database utilities for operations that
|
|
4
|
+
don't fit the normal async SQLAlchemy workflow, such as:
|
|
5
|
+
- Waiting for database readiness at startup
|
|
6
|
+
- Executing maintenance SQL
|
|
7
|
+
- Dropping tables with lock handling
|
|
8
|
+
- Terminating blocking queries
|
|
9
|
+
|
|
10
|
+
These utilities use psycopg2 directly for maximum reliability in
|
|
11
|
+
edge cases where the ORM might not be available or appropriate.
|
|
12
|
+
|
|
13
|
+
Example:
|
|
14
|
+
>>> from svc_infra.db.ops import wait_for_database, run_sync_sql
|
|
15
|
+
>>>
|
|
16
|
+
>>> # Wait for database before app starts
|
|
17
|
+
>>> wait_for_database(timeout=30)
|
|
18
|
+
>>>
|
|
19
|
+
>>> # Run maintenance query
|
|
20
|
+
>>> run_sync_sql("VACUUM ANALYZE my_table")
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
from __future__ import annotations
|
|
24
|
+
|
|
25
|
+
import logging
|
|
26
|
+
import sys
|
|
27
|
+
import time
|
|
28
|
+
from typing import Any, Optional, Sequence, cast
|
|
29
|
+
|
|
30
|
+
from .sql.utils import get_database_url_from_env
|
|
31
|
+
|
|
32
|
+
logger = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
# Timeout for individual database operations (seconds)
|
|
35
|
+
DEFAULT_STATEMENT_TIMEOUT = 30
|
|
36
|
+
|
|
37
|
+
# Default wait-for-database settings
|
|
38
|
+
DEFAULT_WAIT_TIMEOUT = 30
|
|
39
|
+
DEFAULT_WAIT_INTERVAL = 1.0
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _flush() -> None:
|
|
43
|
+
"""Force flush stdout/stderr for containerized log visibility."""
|
|
44
|
+
sys.stdout.flush()
|
|
45
|
+
sys.stderr.flush()
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _get_connection(url: Optional[str] = None, connect_timeout: int = 10) -> Any:
|
|
49
|
+
"""
|
|
50
|
+
Get a psycopg2 connection.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
url: Database URL. If None, resolved from environment.
|
|
54
|
+
connect_timeout: Connection timeout in seconds.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
psycopg2 connection object
|
|
58
|
+
|
|
59
|
+
Raises:
|
|
60
|
+
ImportError: If psycopg2 is not installed
|
|
61
|
+
RuntimeError: If no database URL is available
|
|
62
|
+
"""
|
|
63
|
+
try:
|
|
64
|
+
import psycopg2
|
|
65
|
+
except ImportError as e:
|
|
66
|
+
raise ImportError(
|
|
67
|
+
"psycopg2 is required for db.ops utilities. "
|
|
68
|
+
"Install with: pip install psycopg2-binary"
|
|
69
|
+
) from e
|
|
70
|
+
|
|
71
|
+
if url is None:
|
|
72
|
+
url = get_database_url_from_env(required=True)
|
|
73
|
+
|
|
74
|
+
# Add connect_timeout to connection options
|
|
75
|
+
return psycopg2.connect(url, connect_timeout=connect_timeout)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def wait_for_database(
|
|
79
|
+
url: Optional[str] = None,
|
|
80
|
+
timeout: float = DEFAULT_WAIT_TIMEOUT,
|
|
81
|
+
interval: float = DEFAULT_WAIT_INTERVAL,
|
|
82
|
+
verbose: bool = True,
|
|
83
|
+
) -> bool:
|
|
84
|
+
"""
|
|
85
|
+
Wait for database to be ready, with retries.
|
|
86
|
+
|
|
87
|
+
Useful for container startup scripts where the database may not
|
|
88
|
+
be immediately available.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
url: Database URL. If None, resolved from environment.
|
|
92
|
+
timeout: Maximum time to wait in seconds (default: 30)
|
|
93
|
+
interval: Time between retry attempts in seconds (default: 1.0)
|
|
94
|
+
verbose: If True, log progress messages
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
True if database is ready, False if timeout reached
|
|
98
|
+
|
|
99
|
+
Example:
|
|
100
|
+
>>> # In container startup script
|
|
101
|
+
>>> if not wait_for_database(timeout=60):
|
|
102
|
+
... sys.exit(1)
|
|
103
|
+
>>> # Database is ready, continue with app startup
|
|
104
|
+
"""
|
|
105
|
+
if url is None:
|
|
106
|
+
url = get_database_url_from_env(required=True)
|
|
107
|
+
|
|
108
|
+
start = time.monotonic()
|
|
109
|
+
attempt = 0
|
|
110
|
+
|
|
111
|
+
while True:
|
|
112
|
+
attempt += 1
|
|
113
|
+
elapsed = time.monotonic() - start
|
|
114
|
+
|
|
115
|
+
if elapsed >= timeout:
|
|
116
|
+
if verbose:
|
|
117
|
+
logger.error(
|
|
118
|
+
f"Database not ready after {timeout}s ({attempt} attempts)"
|
|
119
|
+
)
|
|
120
|
+
_flush()
|
|
121
|
+
return False
|
|
122
|
+
|
|
123
|
+
try:
|
|
124
|
+
conn = _get_connection(url, connect_timeout=min(5, int(timeout - elapsed)))
|
|
125
|
+
conn.close()
|
|
126
|
+
if verbose:
|
|
127
|
+
logger.info(f"Database ready after {elapsed:.1f}s ({attempt} attempts)")
|
|
128
|
+
_flush()
|
|
129
|
+
return True
|
|
130
|
+
except Exception as e:
|
|
131
|
+
if verbose:
|
|
132
|
+
remaining = timeout - elapsed
|
|
133
|
+
logger.debug(
|
|
134
|
+
f"Database not ready ({e}), retrying... ({remaining:.0f}s remaining)"
|
|
135
|
+
)
|
|
136
|
+
_flush()
|
|
137
|
+
time.sleep(interval)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def run_sync_sql(
|
|
141
|
+
sql: str,
|
|
142
|
+
params: Optional[Sequence[Any]] = None,
|
|
143
|
+
url: Optional[str] = None,
|
|
144
|
+
timeout: int = DEFAULT_STATEMENT_TIMEOUT,
|
|
145
|
+
fetch: bool = False,
|
|
146
|
+
) -> Optional[list[tuple[Any, ...]]]:
|
|
147
|
+
"""
|
|
148
|
+
Execute SQL synchronously with a statement timeout.
|
|
149
|
+
|
|
150
|
+
This is useful for one-off administrative queries that don't fit
|
|
151
|
+
the normal async SQLAlchemy workflow.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
sql: SQL statement to execute
|
|
155
|
+
params: Optional parameters for parameterized queries
|
|
156
|
+
url: Database URL. If None, resolved from environment.
|
|
157
|
+
timeout: Statement timeout in seconds (default: 30)
|
|
158
|
+
fetch: If True, return fetched rows; if False, return None
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
List of tuples if fetch=True, otherwise None
|
|
162
|
+
|
|
163
|
+
Raises:
|
|
164
|
+
psycopg2.Error: On database errors
|
|
165
|
+
TimeoutError: If statement exceeds timeout
|
|
166
|
+
|
|
167
|
+
Example:
|
|
168
|
+
>>> # Run a maintenance query
|
|
169
|
+
>>> run_sync_sql("VACUUM ANALYZE users")
|
|
170
|
+
>>>
|
|
171
|
+
>>> # Fetch data with timeout
|
|
172
|
+
>>> rows = run_sync_sql(
|
|
173
|
+
... "SELECT id, name FROM users WHERE active = %s",
|
|
174
|
+
... params=(True,),
|
|
175
|
+
... fetch=True,
|
|
176
|
+
... timeout=10
|
|
177
|
+
... )
|
|
178
|
+
"""
|
|
179
|
+
conn = _get_connection(url)
|
|
180
|
+
try:
|
|
181
|
+
with conn.cursor() as cur:
|
|
182
|
+
# Set statement timeout (PostgreSQL-specific)
|
|
183
|
+
cur.execute(f"SET statement_timeout = '{timeout}s'")
|
|
184
|
+
|
|
185
|
+
if params:
|
|
186
|
+
cur.execute(sql, params)
|
|
187
|
+
else:
|
|
188
|
+
cur.execute(sql)
|
|
189
|
+
|
|
190
|
+
if fetch:
|
|
191
|
+
return cast(list[tuple[Any, ...]], cur.fetchall())
|
|
192
|
+
|
|
193
|
+
conn.commit()
|
|
194
|
+
return None
|
|
195
|
+
finally:
|
|
196
|
+
conn.close()
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def kill_blocking_queries(
|
|
200
|
+
table_name: str,
|
|
201
|
+
url: Optional[str] = None,
|
|
202
|
+
timeout: int = DEFAULT_STATEMENT_TIMEOUT,
|
|
203
|
+
dry_run: bool = False,
|
|
204
|
+
) -> list[dict[str, Any]]:
|
|
205
|
+
"""
|
|
206
|
+
Terminate queries blocking operations on a specific table.
|
|
207
|
+
|
|
208
|
+
This is useful before DROP TABLE or ALTER TABLE operations that
|
|
209
|
+
might be blocked by long-running queries or idle transactions.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
table_name: Name of the table (can include schema as 'schema.table')
|
|
213
|
+
url: Database URL. If None, resolved from environment.
|
|
214
|
+
timeout: Statement timeout in seconds (default: 30)
|
|
215
|
+
dry_run: If True, only report blocking queries without terminating
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
List of dicts with info about terminated (or found) queries:
|
|
219
|
+
[{"pid": 123, "query": "SELECT...", "state": "active", "terminated": True}]
|
|
220
|
+
|
|
221
|
+
Example:
|
|
222
|
+
>>> # Check what would be terminated
|
|
223
|
+
>>> blocking = kill_blocking_queries("embeddings", dry_run=True)
|
|
224
|
+
>>> print(f"Found {len(blocking)} blocking queries")
|
|
225
|
+
>>>
|
|
226
|
+
>>> # Actually terminate them
|
|
227
|
+
>>> kill_blocking_queries("embeddings")
|
|
228
|
+
"""
|
|
229
|
+
# Query to find blocking queries on a table
|
|
230
|
+
find_blocking_sql = """
|
|
231
|
+
SELECT pid, state, query, age(clock_timestamp(), query_start) as duration
|
|
232
|
+
FROM pg_stat_activity
|
|
233
|
+
WHERE pid != pg_backend_pid()
|
|
234
|
+
AND state != 'idle'
|
|
235
|
+
AND (
|
|
236
|
+
query ILIKE %s
|
|
237
|
+
OR query ILIKE %s
|
|
238
|
+
OR query ILIKE %s
|
|
239
|
+
)
|
|
240
|
+
ORDER BY query_start;
|
|
241
|
+
"""
|
|
242
|
+
|
|
243
|
+
# Patterns to match queries involving the table
|
|
244
|
+
patterns = (
|
|
245
|
+
f"%{table_name}%",
|
|
246
|
+
f"%{table_name.split('.')[-1]}%", # Just table name without schema
|
|
247
|
+
f"%{table_name.replace('.', '%')}%", # Handle schema.table pattern
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
conn = _get_connection(url)
|
|
251
|
+
terminated: list[dict[str, Any]] = []
|
|
252
|
+
|
|
253
|
+
try:
|
|
254
|
+
with conn.cursor() as cur:
|
|
255
|
+
cur.execute(f"SET statement_timeout = '{timeout}s'")
|
|
256
|
+
cur.execute(find_blocking_sql, patterns)
|
|
257
|
+
rows = cur.fetchall()
|
|
258
|
+
|
|
259
|
+
for pid, state, query, duration in rows:
|
|
260
|
+
info = {
|
|
261
|
+
"pid": pid,
|
|
262
|
+
"state": state,
|
|
263
|
+
"query": query[:200] + "..." if len(query) > 200 else query,
|
|
264
|
+
"duration": str(duration),
|
|
265
|
+
"terminated": False,
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
if not dry_run:
|
|
269
|
+
try:
|
|
270
|
+
cur.execute("SELECT pg_terminate_backend(%s)", (pid,))
|
|
271
|
+
info["terminated"] = True
|
|
272
|
+
logger.info(f"Terminated query PID {pid}: {query[:100]}...")
|
|
273
|
+
except Exception as e:
|
|
274
|
+
logger.warning(f"Failed to terminate PID {pid}: {e}")
|
|
275
|
+
info["error"] = str(e)
|
|
276
|
+
|
|
277
|
+
terminated.append(info)
|
|
278
|
+
|
|
279
|
+
conn.commit()
|
|
280
|
+
finally:
|
|
281
|
+
conn.close()
|
|
282
|
+
|
|
283
|
+
_flush()
|
|
284
|
+
return terminated
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def drop_table_safe(
|
|
288
|
+
table_name: str,
|
|
289
|
+
url: Optional[str] = None,
|
|
290
|
+
timeout: int = DEFAULT_STATEMENT_TIMEOUT,
|
|
291
|
+
kill_blocking: bool = True,
|
|
292
|
+
if_exists: bool = True,
|
|
293
|
+
cascade: bool = False,
|
|
294
|
+
) -> bool:
|
|
295
|
+
"""
|
|
296
|
+
Drop a table safely with lock handling.
|
|
297
|
+
|
|
298
|
+
Handles common issues with DROP TABLE:
|
|
299
|
+
- Terminates blocking queries first (optional)
|
|
300
|
+
- Uses statement timeout to avoid hanging
|
|
301
|
+
- Handles 'table does not exist' gracefully
|
|
302
|
+
|
|
303
|
+
Args:
|
|
304
|
+
table_name: Name of table to drop (can include schema)
|
|
305
|
+
url: Database URL. If None, resolved from environment.
|
|
306
|
+
timeout: Statement timeout in seconds (default: 30)
|
|
307
|
+
kill_blocking: If True, terminate blocking queries first (default: True)
|
|
308
|
+
if_exists: If True, don't error if table doesn't exist (default: True)
|
|
309
|
+
cascade: If True, drop dependent objects (default: False)
|
|
310
|
+
|
|
311
|
+
Returns:
|
|
312
|
+
True if table was dropped (or didn't exist), False on error
|
|
313
|
+
|
|
314
|
+
Example:
|
|
315
|
+
>>> # Drop table, killing any blocking queries first
|
|
316
|
+
>>> drop_table_safe("embeddings", cascade=True)
|
|
317
|
+
True
|
|
318
|
+
>>>
|
|
319
|
+
>>> # Safe to call even if table doesn't exist
|
|
320
|
+
>>> drop_table_safe("nonexistent_table")
|
|
321
|
+
True
|
|
322
|
+
"""
|
|
323
|
+
if url is None:
|
|
324
|
+
url = get_database_url_from_env(required=True)
|
|
325
|
+
|
|
326
|
+
# Kill blocking queries first if requested
|
|
327
|
+
if kill_blocking:
|
|
328
|
+
blocked = kill_blocking_queries(table_name, url=url, timeout=timeout)
|
|
329
|
+
if blocked:
|
|
330
|
+
logger.info(f"Terminated {len(blocked)} blocking queries before DROP")
|
|
331
|
+
# Brief pause to let connections clean up
|
|
332
|
+
time.sleep(0.5)
|
|
333
|
+
|
|
334
|
+
# Build DROP statement
|
|
335
|
+
drop_sql = "DROP TABLE"
|
|
336
|
+
if if_exists:
|
|
337
|
+
drop_sql += " IF EXISTS"
|
|
338
|
+
drop_sql += f" {table_name}"
|
|
339
|
+
if cascade:
|
|
340
|
+
drop_sql += " CASCADE"
|
|
341
|
+
|
|
342
|
+
try:
|
|
343
|
+
run_sync_sql(drop_sql, url=url, timeout=timeout)
|
|
344
|
+
logger.info(f"Dropped table: {table_name}")
|
|
345
|
+
_flush()
|
|
346
|
+
return True
|
|
347
|
+
except Exception as e:
|
|
348
|
+
logger.error(f"Failed to drop table {table_name}: {e}")
|
|
349
|
+
_flush()
|
|
350
|
+
return False
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
def get_database_url(
|
|
354
|
+
required: bool = True,
|
|
355
|
+
normalize: bool = True,
|
|
356
|
+
) -> Optional[str]:
|
|
357
|
+
"""
|
|
358
|
+
Convenience wrapper for get_database_url_from_env().
|
|
359
|
+
|
|
360
|
+
This is the recommended way to get the database URL, as it
|
|
361
|
+
handles all common environment variable names and normalizations.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
required: If True, raise RuntimeError when no URL is found
|
|
365
|
+
normalize: If True, convert postgres:// to postgresql://
|
|
366
|
+
|
|
367
|
+
Returns:
|
|
368
|
+
Database URL string, or None if not found and not required
|
|
369
|
+
|
|
370
|
+
Example:
|
|
371
|
+
>>> url = get_database_url()
|
|
372
|
+
>>> print(url)
|
|
373
|
+
'postgresql://user:pass@host:5432/db'
|
|
374
|
+
"""
|
|
375
|
+
return get_database_url_from_env(required=required, normalize=normalize)
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
__all__ = [
|
|
379
|
+
"wait_for_database",
|
|
380
|
+
"run_sync_sql",
|
|
381
|
+
"kill_blocking_queries",
|
|
382
|
+
"drop_table_safe",
|
|
383
|
+
"get_database_url",
|
|
384
|
+
]
|
svc_infra/db/outbox.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
from typing import Any, Dict, Iterable, List, Optional, Protocol
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class OutboxMessage:
|
|
10
|
+
id: int
|
|
11
|
+
topic: str
|
|
12
|
+
payload: Dict[str, Any]
|
|
13
|
+
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
|
14
|
+
attempts: int = 0
|
|
15
|
+
processed_at: Optional[datetime] = None
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OutboxStore(Protocol):
|
|
19
|
+
def enqueue(self, topic: str, payload: Dict[str, Any]) -> OutboxMessage:
|
|
20
|
+
pass
|
|
21
|
+
|
|
22
|
+
def fetch_next(
|
|
23
|
+
self, *, topics: Optional[Iterable[str]] = None
|
|
24
|
+
) -> Optional[OutboxMessage]:
|
|
25
|
+
"""Return the next undispatched, unprocessed message (FIFO per-topic), or None.
|
|
26
|
+
|
|
27
|
+
Notes:
|
|
28
|
+
- Messages with attempts > 0 are considered "dispatched" to the job queue and won't be re-enqueued.
|
|
29
|
+
- Delivery retries are handled by the job queue worker, not by re-reading the outbox.
|
|
30
|
+
"""
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
def mark_processed(self, msg_id: int) -> None:
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
def mark_failed(self, msg_id: int) -> None:
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class InMemoryOutboxStore:
|
|
41
|
+
"""Simple in-memory outbox for tests and local runs."""
|
|
42
|
+
|
|
43
|
+
def __init__(self):
|
|
44
|
+
self._seq = 0
|
|
45
|
+
self._messages: List[OutboxMessage] = []
|
|
46
|
+
|
|
47
|
+
def enqueue(self, topic: str, payload: Dict[str, Any]) -> OutboxMessage:
|
|
48
|
+
self._seq += 1
|
|
49
|
+
msg = OutboxMessage(id=self._seq, topic=topic, payload=dict(payload))
|
|
50
|
+
self._messages.append(msg)
|
|
51
|
+
return msg
|
|
52
|
+
|
|
53
|
+
def fetch_next(
|
|
54
|
+
self, *, topics: Optional[Iterable[str]] = None
|
|
55
|
+
) -> Optional[OutboxMessage]:
|
|
56
|
+
allowed = set(topics) if topics else None
|
|
57
|
+
for msg in self._messages:
|
|
58
|
+
if msg.processed_at is not None:
|
|
59
|
+
continue
|
|
60
|
+
# skip already dispatched messages (attempts>0)
|
|
61
|
+
if msg.attempts > 0:
|
|
62
|
+
continue
|
|
63
|
+
if allowed is not None and msg.topic not in allowed:
|
|
64
|
+
continue
|
|
65
|
+
return msg
|
|
66
|
+
return None
|
|
67
|
+
|
|
68
|
+
def mark_processed(self, msg_id: int) -> None:
|
|
69
|
+
for msg in self._messages:
|
|
70
|
+
if msg.id == msg_id:
|
|
71
|
+
msg.processed_at = datetime.now(timezone.utc)
|
|
72
|
+
return
|
|
73
|
+
|
|
74
|
+
def mark_failed(self, msg_id: int) -> None:
|
|
75
|
+
for msg in self._messages:
|
|
76
|
+
if msg.id == msg_id:
|
|
77
|
+
msg.attempts += 1
|
|
78
|
+
return
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class SqlOutboxStore:
|
|
82
|
+
"""Skeleton for a SQL-backed outbox store.
|
|
83
|
+
|
|
84
|
+
Implementations should:
|
|
85
|
+
- INSERT on enqueue
|
|
86
|
+
- SELECT FOR UPDATE SKIP LOCKED (or equivalent) to fetch next
|
|
87
|
+
- UPDATE processed_at (and attempts on failure)
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
def __init__(self, session_factory):
|
|
91
|
+
self._session_factory = session_factory
|
|
92
|
+
|
|
93
|
+
# Placeholders to outline the API; not implemented here.
|
|
94
|
+
def enqueue(
|
|
95
|
+
self, topic: str, payload: Dict[str, Any]
|
|
96
|
+
) -> OutboxMessage: # pragma: no cover - skeleton
|
|
97
|
+
raise NotImplementedError
|
|
98
|
+
|
|
99
|
+
def fetch_next(
|
|
100
|
+
self, *, topics: Optional[Iterable[str]] = None
|
|
101
|
+
) -> Optional[OutboxMessage]: # pragma: no cover - skeleton
|
|
102
|
+
raise NotImplementedError
|
|
103
|
+
|
|
104
|
+
def mark_processed(self, msg_id: int) -> None: # pragma: no cover - skeleton
|
|
105
|
+
raise NotImplementedError
|
|
106
|
+
|
|
107
|
+
def mark_failed(self, msg_id: int) -> None: # pragma: no cover - skeleton
|
|
108
|
+
raise NotImplementedError
|
svc_infra/db/sql/apikey.py
CHANGED
|
@@ -7,18 +7,36 @@ import uuid
|
|
|
7
7
|
from datetime import datetime, timezone
|
|
8
8
|
from typing import Optional, Type
|
|
9
9
|
|
|
10
|
-
from sqlalchemy import
|
|
10
|
+
from sqlalchemy import (
|
|
11
|
+
JSON,
|
|
12
|
+
Boolean,
|
|
13
|
+
DateTime,
|
|
14
|
+
ForeignKey,
|
|
15
|
+
Index,
|
|
16
|
+
String,
|
|
17
|
+
UniqueConstraint,
|
|
18
|
+
text,
|
|
19
|
+
)
|
|
11
20
|
from sqlalchemy.ext.mutable import MutableDict, MutableList
|
|
12
21
|
from sqlalchemy.orm import Mapped, declared_attr, mapped_column, relationship
|
|
13
22
|
|
|
23
|
+
from svc_infra.app.env import require_secret
|
|
14
24
|
from svc_infra.db.sql.base import ModelBase
|
|
15
25
|
from svc_infra.db.sql.types import GUID
|
|
16
26
|
|
|
17
|
-
|
|
27
|
+
|
|
28
|
+
def _get_apikey_secret() -> str:
|
|
29
|
+
"""Get APIKEY_HASH_SECRET, requiring it in production."""
|
|
30
|
+
return require_secret(
|
|
31
|
+
os.getenv("APIKEY_HASH_SECRET"),
|
|
32
|
+
"APIKEY_HASH_SECRET",
|
|
33
|
+
dev_default="dev-only-apikey-hmac-secret-not-for-production",
|
|
34
|
+
)
|
|
18
35
|
|
|
19
36
|
|
|
20
37
|
def _hmac_sha256(s: str) -> str:
|
|
21
|
-
|
|
38
|
+
secret = _get_apikey_secret()
|
|
39
|
+
return hmac.new(secret.encode(), s.encode(), hashlib.sha256).hexdigest()
|
|
22
40
|
|
|
23
41
|
|
|
24
42
|
def _now() -> datetime:
|
|
@@ -33,7 +51,9 @@ _ApiKeyModel: Optional[type] = None
|
|
|
33
51
|
def get_apikey_model() -> type:
|
|
34
52
|
"""Return the bound ApiKey model (or raise if not enabled)."""
|
|
35
53
|
if _ApiKeyModel is None:
|
|
36
|
-
raise RuntimeError(
|
|
54
|
+
raise RuntimeError(
|
|
55
|
+
"ApiKey model is not enabled. Call bind_apikey_model(...) first."
|
|
56
|
+
)
|
|
37
57
|
return _ApiKeyModel
|
|
38
58
|
|
|
39
59
|
|
|
@@ -43,10 +63,12 @@ def bind_apikey_model(user_model: Type, *, table_name: str = "api_keys") -> type
|
|
|
43
63
|
Call this once during app boot (e.g., inside add_auth_users when enable_api_keys=True).
|
|
44
64
|
"""
|
|
45
65
|
|
|
46
|
-
class ApiKey(ModelBase):
|
|
66
|
+
class ApiKey(ModelBase):
|
|
47
67
|
__tablename__ = table_name
|
|
48
68
|
|
|
49
|
-
id: Mapped[uuid.UUID] = mapped_column(
|
|
69
|
+
id: Mapped[uuid.UUID] = mapped_column(
|
|
70
|
+
GUID(), primary_key=True, default=uuid.uuid4
|
|
71
|
+
)
|
|
50
72
|
|
|
51
73
|
@declared_attr
|
|
52
74
|
def user_id(cls) -> Mapped[uuid.UUID | None]: # noqa: N805
|
|
@@ -67,14 +89,18 @@ def bind_apikey_model(user_model: Type, *, table_name: str = "api_keys") -> type
|
|
|
67
89
|
key_prefix: Mapped[str] = mapped_column(String(12), index=True, nullable=False)
|
|
68
90
|
key_hash: Mapped[str] = mapped_column(String(64), nullable=False) # hex sha256
|
|
69
91
|
|
|
70
|
-
scopes: Mapped[list[str]] = mapped_column(
|
|
92
|
+
scopes: Mapped[list[str]] = mapped_column(
|
|
93
|
+
MutableList.as_mutable(JSON), default=list
|
|
94
|
+
)
|
|
71
95
|
active: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False)
|
|
72
96
|
expires_at: Mapped[datetime | None] = mapped_column(DateTime(timezone=True))
|
|
73
97
|
last_used_at: Mapped[datetime | None] = mapped_column(DateTime(timezone=True))
|
|
74
98
|
meta: Mapped[dict] = mapped_column(MutableDict.as_mutable(JSON), default=dict)
|
|
75
99
|
|
|
76
100
|
created_at = mapped_column(
|
|
77
|
-
DateTime(timezone=True),
|
|
101
|
+
DateTime(timezone=True),
|
|
102
|
+
server_default=text("CURRENT_TIMESTAMP"),
|
|
103
|
+
nullable=False,
|
|
78
104
|
)
|
|
79
105
|
updated_at = mapped_column(
|
|
80
106
|
DateTime(timezone=True),
|
|
@@ -99,7 +125,9 @@ def bind_apikey_model(user_model: Type, *, table_name: str = "api_keys") -> type
|
|
|
99
125
|
import secrets
|
|
100
126
|
|
|
101
127
|
prefix = secrets.token_urlsafe(6).replace("-", "").replace("_", "")[:8]
|
|
102
|
-
rand =
|
|
128
|
+
rand = (
|
|
129
|
+
base64.urlsafe_b64encode(secrets.token_bytes(24)).decode().rstrip("=")
|
|
130
|
+
)
|
|
103
131
|
plaintext = f"ak_{prefix}_{rand}"
|
|
104
132
|
return plaintext, prefix, _hmac_sha256(plaintext)
|
|
105
133
|
|
svc_infra/db/sql/authref.py
CHANGED
|
@@ -22,11 +22,15 @@ def _find_auth_mapper() -> Optional[Tuple[str, TypeEngine, str]]:
|
|
|
22
22
|
table = mapper.local_table or getattr(cls, "__table__", None)
|
|
23
23
|
if table is None:
|
|
24
24
|
continue
|
|
25
|
-
|
|
25
|
+
table_name = getattr(table, "name", None)
|
|
26
|
+
if not isinstance(table_name, str) or not table_name:
|
|
27
|
+
continue
|
|
28
|
+
# SQLAlchemy's primary_key is iterable; don't rely on .columns typing.
|
|
29
|
+
pk_cols = list(table.primary_key)
|
|
26
30
|
if len(pk_cols) != 1:
|
|
27
31
|
continue # require single-column PK
|
|
28
32
|
pk_col = pk_cols[0]
|
|
29
|
-
return (
|
|
33
|
+
return (table_name, pk_col.type, pk_col.name)
|
|
30
34
|
except Exception:
|
|
31
35
|
pass
|
|
32
36
|
return None
|
|
@@ -58,4 +62,6 @@ def user_fk_constraint(
|
|
|
58
62
|
Returns a table-level ForeignKeyConstraint([...], [<auth_table>.<pk>]) for the given column.
|
|
59
63
|
"""
|
|
60
64
|
table, _pk_type, pk_name = resolve_auth_table_pk()
|
|
61
|
-
return ForeignKeyConstraint(
|
|
65
|
+
return ForeignKeyConstraint(
|
|
66
|
+
[column_name], [f"{table}.{pk_name}"], ondelete=ondelete
|
|
67
|
+
)
|
svc_infra/db/sql/constants.py
CHANGED
|
@@ -4,9 +4,13 @@ import re
|
|
|
4
4
|
from typing import Sequence
|
|
5
5
|
|
|
6
6
|
# Environment variable names to look up for DB URL
|
|
7
|
+
# Order matters: svc-infra canonical names first, then common PaaS names
|
|
7
8
|
DEFAULT_DB_ENV_VARS: Sequence[str] = (
|
|
8
9
|
"SQL_URL",
|
|
9
10
|
"DB_URL",
|
|
11
|
+
"DATABASE_URL", # Heroku, Railway (public)
|
|
12
|
+
"DATABASE_URL_PRIVATE", # Railway (private networking)
|
|
13
|
+
"PRIVATE_SQL_URL", # Legacy svc-infra naming
|
|
10
14
|
)
|
|
11
15
|
|
|
12
16
|
# Regex used to detect async drivers from URL drivername
|
|
@@ -18,16 +22,16 @@ try:
|
|
|
18
22
|
import importlib.resources as pkg
|
|
19
23
|
|
|
20
24
|
_tmpl_pkg = pkg.files("svc_infra.db.sql.templates.setup")
|
|
21
|
-
ALEMBIC_INI_TEMPLATE = _tmpl_pkg.joinpath("alembic.ini.tmpl").read_text(
|
|
22
|
-
|
|
23
|
-
except Exception:
|
|
24
|
-
# Fallbacks (should not normally happen). Provide minimal safe defaults.
|
|
25
|
-
ALEMBIC_INI_TEMPLATE = (
|
|
26
|
-
"""[alembic]\nscript_location = {script_location}\nsqlalchemy.url = {sqlalchemy_url}\n"""
|
|
25
|
+
ALEMBIC_INI_TEMPLATE = _tmpl_pkg.joinpath("alembic.ini.tmpl").read_text(
|
|
26
|
+
encoding="utf-8"
|
|
27
27
|
)
|
|
28
|
-
|
|
29
|
-
|
|
28
|
+
ALEMBIC_SCRIPT_TEMPLATE = _tmpl_pkg.joinpath("script.py.mako.tmpl").read_text(
|
|
29
|
+
encoding="utf-8"
|
|
30
30
|
)
|
|
31
|
+
except Exception:
|
|
32
|
+
# Fallbacks (should not normally happen). Provide minimal safe defaults.
|
|
33
|
+
ALEMBIC_INI_TEMPLATE = """[alembic]\nscript_location = {script_location}\nsqlalchemy.url = {sqlalchemy_url}\n"""
|
|
34
|
+
ALEMBIC_INI_TEMPLATE = """[alembic]\nscript_location = {script_location}\nsqlalchemy.url = {sqlalchemy_url}\n"""
|
|
31
35
|
ALEMBIC_SCRIPT_TEMPLATE = '"""${message}"""\nfrom alembic import op\nimport sqlalchemy as sa\n\nrevision = ${repr(up_revision)}\ndown_revision = ${repr(down_revision)}\nbranch_labels = ${repr(branch_labels)}\ndepends_on = ${repr(depends_on)}\n\ndef upgrade():\n ${upgrades if upgrades else "pass"}\n\n\ndef downgrade():\n ${downgrades if downgrades else "pass"}\n'
|
|
32
36
|
__all__ = [
|
|
33
37
|
"DEFAULT_DB_ENV_VARS",
|
svc_infra/db/sql/core.py
CHANGED
|
@@ -140,7 +140,7 @@ def revision(
|
|
|
140
140
|
cfg,
|
|
141
141
|
message=message,
|
|
142
142
|
autogenerate=autogenerate,
|
|
143
|
-
head=head,
|
|
143
|
+
head=head or "head",
|
|
144
144
|
branch_label=branch_label,
|
|
145
145
|
version_path=version_path,
|
|
146
146
|
sql=sql,
|
|
@@ -319,7 +319,7 @@ def setup_and_migrate(
|
|
|
319
319
|
"""
|
|
320
320
|
resolved_url = database_url or get_database_url_from_env(required=True)
|
|
321
321
|
root = prepare_env()
|
|
322
|
-
if create_db_if_missing:
|
|
322
|
+
if create_db_if_missing and resolved_url:
|
|
323
323
|
ensure_database_exists(resolved_url)
|
|
324
324
|
|
|
325
325
|
mig_dir = init_alembic(
|