svc-infra 0.1.706__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of svc-infra might be problematic. Click here for more details.
- svc_infra/apf_payments/models.py +47 -108
- svc_infra/apf_payments/provider/__init__.py +2 -2
- svc_infra/apf_payments/provider/aiydan.py +42 -100
- svc_infra/apf_payments/provider/base.py +10 -26
- svc_infra/apf_payments/provider/registry.py +3 -5
- svc_infra/apf_payments/provider/stripe.py +63 -135
- svc_infra/apf_payments/schemas.py +82 -90
- svc_infra/apf_payments/service.py +40 -86
- svc_infra/apf_payments/settings.py +10 -13
- svc_infra/api/__init__.py +13 -13
- svc_infra/api/fastapi/__init__.py +19 -0
- svc_infra/api/fastapi/admin/add.py +13 -18
- svc_infra/api/fastapi/apf_payments/router.py +47 -84
- svc_infra/api/fastapi/apf_payments/setup.py +7 -13
- svc_infra/api/fastapi/auth/__init__.py +1 -1
- svc_infra/api/fastapi/auth/_cookies.py +3 -9
- svc_infra/api/fastapi/auth/add.py +4 -8
- svc_infra/api/fastapi/auth/gaurd.py +9 -26
- svc_infra/api/fastapi/auth/mfa/models.py +4 -7
- svc_infra/api/fastapi/auth/mfa/pre_auth.py +3 -3
- svc_infra/api/fastapi/auth/mfa/router.py +9 -15
- svc_infra/api/fastapi/auth/mfa/security.py +3 -5
- svc_infra/api/fastapi/auth/mfa/utils.py +3 -2
- svc_infra/api/fastapi/auth/mfa/verify.py +2 -9
- svc_infra/api/fastapi/auth/providers.py +4 -6
- svc_infra/api/fastapi/auth/routers/apikey_router.py +16 -18
- svc_infra/api/fastapi/auth/routers/oauth_router.py +37 -85
- svc_infra/api/fastapi/auth/routers/session_router.py +3 -6
- svc_infra/api/fastapi/auth/security.py +17 -28
- svc_infra/api/fastapi/auth/sender.py +1 -3
- svc_infra/api/fastapi/auth/settings.py +18 -19
- svc_infra/api/fastapi/auth/state.py +6 -7
- svc_infra/api/fastapi/auth/ws_security.py +2 -2
- svc_infra/api/fastapi/billing/router.py +6 -8
- svc_infra/api/fastapi/db/http.py +10 -11
- svc_infra/api/fastapi/db/nosql/mongo/add.py +5 -15
- svc_infra/api/fastapi/db/nosql/mongo/crud_router.py +14 -15
- svc_infra/api/fastapi/db/sql/add.py +6 -14
- svc_infra/api/fastapi/db/sql/crud_router.py +27 -40
- svc_infra/api/fastapi/db/sql/health.py +1 -3
- svc_infra/api/fastapi/db/sql/session.py +4 -5
- svc_infra/api/fastapi/db/sql/users.py +8 -11
- svc_infra/api/fastapi/dependencies/ratelimit.py +4 -6
- svc_infra/api/fastapi/docs/add.py +13 -23
- svc_infra/api/fastapi/docs/landing.py +6 -8
- svc_infra/api/fastapi/docs/scoped.py +34 -42
- svc_infra/api/fastapi/dual/dualize.py +1 -1
- svc_infra/api/fastapi/dual/protected.py +12 -21
- svc_infra/api/fastapi/dual/router.py +14 -31
- svc_infra/api/fastapi/ease.py +57 -13
- svc_infra/api/fastapi/http/conditional.py +3 -5
- svc_infra/api/fastapi/middleware/errors/catchall.py +2 -6
- svc_infra/api/fastapi/middleware/errors/exceptions.py +1 -4
- svc_infra/api/fastapi/middleware/errors/handlers.py +12 -18
- svc_infra/api/fastapi/middleware/graceful_shutdown.py +4 -13
- svc_infra/api/fastapi/middleware/idempotency.py +11 -16
- svc_infra/api/fastapi/middleware/idempotency_store.py +14 -14
- svc_infra/api/fastapi/middleware/optimistic_lock.py +5 -8
- svc_infra/api/fastapi/middleware/ratelimit.py +8 -8
- svc_infra/api/fastapi/middleware/ratelimit_store.py +7 -8
- svc_infra/api/fastapi/middleware/request_id.py +1 -3
- svc_infra/api/fastapi/middleware/timeout.py +9 -10
- svc_infra/api/fastapi/object_router.py +1060 -0
- svc_infra/api/fastapi/openapi/apply.py +5 -6
- svc_infra/api/fastapi/openapi/conventions.py +4 -4
- svc_infra/api/fastapi/openapi/mutators.py +13 -31
- svc_infra/api/fastapi/openapi/pipeline.py +2 -2
- svc_infra/api/fastapi/openapi/responses.py +4 -6
- svc_infra/api/fastapi/openapi/security.py +1 -3
- svc_infra/api/fastapi/ops/add.py +7 -9
- svc_infra/api/fastapi/pagination.py +25 -37
- svc_infra/api/fastapi/routers/__init__.py +16 -38
- svc_infra/api/fastapi/setup.py +13 -31
- svc_infra/api/fastapi/tenancy/add.py +3 -2
- svc_infra/api/fastapi/tenancy/context.py +8 -7
- svc_infra/api/fastapi/versioned.py +3 -2
- svc_infra/app/env.py +5 -7
- svc_infra/app/logging/add.py +2 -1
- svc_infra/app/logging/filter.py +1 -1
- svc_infra/app/logging/formats.py +3 -2
- svc_infra/app/root.py +3 -3
- svc_infra/billing/__init__.py +19 -2
- svc_infra/billing/async_service.py +27 -7
- svc_infra/billing/jobs.py +23 -33
- svc_infra/billing/models.py +21 -52
- svc_infra/billing/quotas.py +5 -7
- svc_infra/billing/schemas.py +4 -6
- svc_infra/cache/__init__.py +12 -5
- svc_infra/cache/add.py +6 -9
- svc_infra/cache/backend.py +6 -5
- svc_infra/cache/decorators.py +17 -28
- svc_infra/cache/keys.py +2 -2
- svc_infra/cache/recache.py +22 -35
- svc_infra/cache/resources.py +8 -16
- svc_infra/cache/ttl.py +2 -3
- svc_infra/cache/utils.py +5 -6
- svc_infra/cli/__init__.py +4 -12
- svc_infra/cli/cmds/db/nosql/mongo/mongo_cmds.py +11 -10
- svc_infra/cli/cmds/db/nosql/mongo/mongo_scaffold_cmds.py +6 -9
- svc_infra/cli/cmds/db/ops_cmds.py +3 -6
- svc_infra/cli/cmds/db/sql/alembic_cmds.py +24 -41
- svc_infra/cli/cmds/db/sql/sql_export_cmds.py +9 -17
- svc_infra/cli/cmds/db/sql/sql_scaffold_cmds.py +10 -10
- svc_infra/cli/cmds/docs/docs_cmds.py +7 -10
- svc_infra/cli/cmds/dx/dx_cmds.py +5 -11
- svc_infra/cli/cmds/jobs/jobs_cmds.py +2 -7
- svc_infra/cli/cmds/obs/obs_cmds.py +4 -7
- svc_infra/cli/cmds/sdk/sdk_cmds.py +5 -15
- svc_infra/cli/foundation/runner.py +6 -11
- svc_infra/cli/foundation/typer_bootstrap.py +1 -2
- svc_infra/data/__init__.py +83 -0
- svc_infra/data/add.py +5 -5
- svc_infra/data/backup.py +8 -10
- svc_infra/data/erasure.py +3 -2
- svc_infra/data/fixtures.py +3 -3
- svc_infra/data/retention.py +8 -13
- svc_infra/db/crud_schema.py +9 -8
- svc_infra/db/nosql/__init__.py +0 -1
- svc_infra/db/nosql/constants.py +1 -1
- svc_infra/db/nosql/core.py +7 -14
- svc_infra/db/nosql/indexes.py +11 -10
- svc_infra/db/nosql/management.py +3 -3
- svc_infra/db/nosql/mongo/client.py +3 -3
- svc_infra/db/nosql/mongo/settings.py +2 -6
- svc_infra/db/nosql/repository.py +27 -28
- svc_infra/db/nosql/resource.py +15 -20
- svc_infra/db/nosql/scaffold.py +13 -17
- svc_infra/db/nosql/service.py +3 -4
- svc_infra/db/nosql/service_with_hooks.py +4 -3
- svc_infra/db/nosql/types.py +2 -6
- svc_infra/db/nosql/utils.py +4 -4
- svc_infra/db/ops.py +14 -18
- svc_infra/db/outbox.py +15 -18
- svc_infra/db/sql/apikey.py +12 -21
- svc_infra/db/sql/authref.py +3 -7
- svc_infra/db/sql/constants.py +9 -9
- svc_infra/db/sql/core.py +11 -11
- svc_infra/db/sql/management.py +2 -6
- svc_infra/db/sql/repository.py +17 -24
- svc_infra/db/sql/resource.py +14 -13
- svc_infra/db/sql/scaffold.py +13 -17
- svc_infra/db/sql/service.py +7 -16
- svc_infra/db/sql/service_with_hooks.py +4 -3
- svc_infra/db/sql/tenant.py +6 -14
- svc_infra/db/sql/uniq.py +8 -7
- svc_infra/db/sql/uniq_hooks.py +14 -19
- svc_infra/db/sql/utils.py +24 -53
- svc_infra/db/utils.py +3 -3
- svc_infra/deploy/__init__.py +8 -15
- svc_infra/documents/add.py +7 -8
- svc_infra/documents/ease.py +8 -8
- svc_infra/documents/models.py +3 -3
- svc_infra/documents/storage.py +11 -13
- svc_infra/dx/__init__.py +58 -0
- svc_infra/dx/add.py +1 -3
- svc_infra/dx/changelog.py +2 -2
- svc_infra/dx/checks.py +1 -1
- svc_infra/health/__init__.py +15 -16
- svc_infra/http/client.py +10 -14
- svc_infra/jobs/__init__.py +79 -0
- svc_infra/jobs/builtins/outbox_processor.py +3 -5
- svc_infra/jobs/builtins/webhook_delivery.py +1 -3
- svc_infra/jobs/loader.py +4 -5
- svc_infra/jobs/queue.py +14 -24
- svc_infra/jobs/redis_queue.py +20 -34
- svc_infra/jobs/runner.py +7 -11
- svc_infra/jobs/scheduler.py +5 -5
- svc_infra/jobs/worker.py +1 -1
- svc_infra/loaders/base.py +5 -4
- svc_infra/loaders/github.py +1 -3
- svc_infra/loaders/url.py +3 -9
- svc_infra/logging/__init__.py +7 -6
- svc_infra/mcp/__init__.py +82 -0
- svc_infra/mcp/svc_infra_mcp.py +2 -2
- svc_infra/obs/add.py +4 -3
- svc_infra/obs/cloud_dash.py +1 -1
- svc_infra/obs/metrics/__init__.py +3 -3
- svc_infra/obs/metrics/asgi.py +9 -14
- svc_infra/obs/metrics/base.py +13 -13
- svc_infra/obs/metrics/http.py +5 -9
- svc_infra/obs/metrics/sqlalchemy.py +9 -12
- svc_infra/obs/metrics.py +3 -3
- svc_infra/obs/settings.py +2 -6
- svc_infra/resilience/__init__.py +44 -0
- svc_infra/resilience/circuit_breaker.py +328 -0
- svc_infra/resilience/retry.py +289 -0
- svc_infra/security/__init__.py +167 -0
- svc_infra/security/add.py +5 -9
- svc_infra/security/audit.py +14 -17
- svc_infra/security/audit_service.py +9 -9
- svc_infra/security/hibp.py +3 -6
- svc_infra/security/jwt_rotation.py +7 -10
- svc_infra/security/lockout.py +12 -11
- svc_infra/security/models.py +37 -46
- svc_infra/security/oauth_models.py +8 -8
- svc_infra/security/org_invites.py +11 -13
- svc_infra/security/passwords.py +4 -6
- svc_infra/security/permissions.py +8 -7
- svc_infra/security/session.py +6 -7
- svc_infra/security/signed_cookies.py +9 -9
- svc_infra/storage/add.py +5 -8
- svc_infra/storage/backends/local.py +13 -21
- svc_infra/storage/backends/memory.py +4 -7
- svc_infra/storage/backends/s3.py +17 -36
- svc_infra/storage/base.py +2 -2
- svc_infra/storage/easy.py +4 -8
- svc_infra/storage/settings.py +16 -18
- svc_infra/testing/__init__.py +36 -39
- svc_infra/utils.py +169 -8
- svc_infra/webhooks/__init__.py +1 -1
- svc_infra/webhooks/add.py +17 -29
- svc_infra/webhooks/encryption.py +2 -2
- svc_infra/webhooks/fastapi.py +2 -4
- svc_infra/webhooks/router.py +3 -3
- svc_infra/webhooks/service.py +5 -6
- svc_infra/webhooks/signing.py +5 -5
- svc_infra/websocket/add.py +2 -3
- svc_infra/websocket/client.py +3 -2
- svc_infra/websocket/config.py +6 -18
- svc_infra/websocket/manager.py +9 -10
- {svc_infra-0.1.706.dist-info → svc_infra-1.1.0.dist-info}/METADATA +11 -5
- svc_infra-1.1.0.dist-info/RECORD +364 -0
- svc_infra/billing/service.py +0 -123
- svc_infra-0.1.706.dist-info/RECORD +0 -357
- {svc_infra-0.1.706.dist-info → svc_infra-1.1.0.dist-info}/LICENSE +0 -0
- {svc_infra-0.1.706.dist-info → svc_infra-1.1.0.dist-info}/WHEEL +0 -0
- {svc_infra-0.1.706.dist-info → svc_infra-1.1.0.dist-info}/entry_points.txt +0 -0
svc_infra/dx/__init__.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
"""Developer experience utilities for CI, changelog, and code quality checks.
|
|
2
|
+
|
|
3
|
+
This module provides utilities to improve developer experience:
|
|
4
|
+
|
|
5
|
+
- **CI Workflow**: Generate GitHub Actions CI workflow files
|
|
6
|
+
- **Changelog**: Generate release sections from conventional commits
|
|
7
|
+
- **Checks**: OpenAPI schema validation and migration verification
|
|
8
|
+
|
|
9
|
+
Example:
|
|
10
|
+
from svc_infra.dx import write_ci_workflow, write_openapi_lint_config
|
|
11
|
+
|
|
12
|
+
# Generate CI workflow for a project
|
|
13
|
+
write_ci_workflow(target_dir="./myproject", python_version="3.12")
|
|
14
|
+
|
|
15
|
+
# Generate OpenAPI lint config
|
|
16
|
+
write_openapi_lint_config(target_dir="./myproject")
|
|
17
|
+
|
|
18
|
+
# Validate OpenAPI schema
|
|
19
|
+
from svc_infra.dx import check_openapi_problem_schema
|
|
20
|
+
|
|
21
|
+
check_openapi_problem_schema(path="openapi.json")
|
|
22
|
+
|
|
23
|
+
# Generate changelog section
|
|
24
|
+
from svc_infra.dx import Commit, generate_release_section
|
|
25
|
+
|
|
26
|
+
commits = [
|
|
27
|
+
Commit(sha="abc123", subject="feat: add new feature"),
|
|
28
|
+
Commit(sha="def456", subject="fix: resolve bug"),
|
|
29
|
+
]
|
|
30
|
+
changelog = generate_release_section(version="1.0.0", commits=commits)
|
|
31
|
+
print(changelog)
|
|
32
|
+
|
|
33
|
+
See Also:
|
|
34
|
+
- CLI commands: svc-infra dx openapi, svc-infra dx changelog
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
from __future__ import annotations
|
|
38
|
+
|
|
39
|
+
# CI workflow generation
|
|
40
|
+
from .add import write_ci_workflow, write_openapi_lint_config
|
|
41
|
+
|
|
42
|
+
# Changelog generation
|
|
43
|
+
from .changelog import Commit, generate_release_section
|
|
44
|
+
|
|
45
|
+
# Code quality checks
|
|
46
|
+
from .checks import check_migrations_up_to_date, check_openapi_problem_schema
|
|
47
|
+
|
|
48
|
+
__all__ = [
|
|
49
|
+
# CI workflow
|
|
50
|
+
"write_ci_workflow",
|
|
51
|
+
"write_openapi_lint_config",
|
|
52
|
+
# Changelog
|
|
53
|
+
"Commit",
|
|
54
|
+
"generate_release_section",
|
|
55
|
+
# Checks
|
|
56
|
+
"check_openapi_problem_schema",
|
|
57
|
+
"check_migrations_up_to_date",
|
|
58
|
+
]
|
svc_infra/dx/add.py
CHANGED
|
@@ -43,9 +43,7 @@ jobs:
|
|
|
43
43
|
return p
|
|
44
44
|
|
|
45
45
|
|
|
46
|
-
def write_openapi_lint_config(
|
|
47
|
-
*, target_dir: str | Path, name: str = ".redocly.yaml"
|
|
48
|
-
) -> Path:
|
|
46
|
+
def write_openapi_lint_config(*, target_dir: str | Path, name: str = ".redocly.yaml") -> Path:
|
|
49
47
|
"""Write a minimal OpenAPI lint config placeholder (Redocly)."""
|
|
50
48
|
p = Path(target_dir) / name
|
|
51
49
|
content = """
|
svc_infra/dx/changelog.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
from collections.abc import Sequence
|
|
3
4
|
from dataclasses import dataclass
|
|
4
5
|
from datetime import date as _date
|
|
5
|
-
from typing import Sequence
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
@dataclass(frozen=True)
|
|
@@ -60,7 +60,7 @@ def generate_release_section(
|
|
|
60
60
|
buckets.setdefault(typ, []).append(_format_item(c))
|
|
61
61
|
|
|
62
62
|
lines: list[str] = [f"## v{version} - {release_date}", ""]
|
|
63
|
-
for key, title in _SECTION_ORDER
|
|
63
|
+
for key, title in [*_SECTION_ORDER, ("other", "Other")]:
|
|
64
64
|
items = buckets.get(key) or []
|
|
65
65
|
if not items:
|
|
66
66
|
continue
|
svc_infra/dx/checks.py
CHANGED
svc_infra/health/__init__.py
CHANGED
|
@@ -37,9 +37,10 @@ from __future__ import annotations
|
|
|
37
37
|
|
|
38
38
|
import asyncio
|
|
39
39
|
import time
|
|
40
|
+
from collections.abc import Awaitable, Callable
|
|
40
41
|
from dataclasses import dataclass, field
|
|
41
42
|
from enum import StrEnum
|
|
42
|
-
from typing import Any
|
|
43
|
+
from typing import Any
|
|
43
44
|
|
|
44
45
|
import httpx
|
|
45
46
|
|
|
@@ -60,8 +61,8 @@ class HealthCheckResult:
|
|
|
60
61
|
name: str
|
|
61
62
|
status: HealthStatus
|
|
62
63
|
latency_ms: float
|
|
63
|
-
message:
|
|
64
|
-
details:
|
|
64
|
+
message: str | None = None
|
|
65
|
+
details: dict[str, Any] | None = None
|
|
65
66
|
|
|
66
67
|
def to_dict(self) -> dict[str, Any]:
|
|
67
68
|
"""Convert to dictionary for JSON serialization."""
|
|
@@ -193,7 +194,7 @@ class HealthRegistry:
|
|
|
193
194
|
# Update latency from our timing
|
|
194
195
|
result.latency_ms = (time.perf_counter() - start) * 1000
|
|
195
196
|
return result
|
|
196
|
-
except
|
|
197
|
+
except TimeoutError:
|
|
197
198
|
return HealthCheckResult(
|
|
198
199
|
name=name,
|
|
199
200
|
status=HealthStatus.UNHEALTHY,
|
|
@@ -208,7 +209,7 @@ class HealthRegistry:
|
|
|
208
209
|
message=str(e),
|
|
209
210
|
)
|
|
210
211
|
|
|
211
|
-
async def check_all(self) ->
|
|
212
|
+
async def check_all(self) -> AggregatedHealthResult:
|
|
212
213
|
"""
|
|
213
214
|
Run all registered health checks concurrently.
|
|
214
215
|
|
|
@@ -261,7 +262,7 @@ class HealthRegistry:
|
|
|
261
262
|
*,
|
|
262
263
|
timeout: float = 60.0,
|
|
263
264
|
interval: float = 2.0,
|
|
264
|
-
check_names:
|
|
265
|
+
check_names: list[str] | None = None,
|
|
265
266
|
) -> bool:
|
|
266
267
|
"""
|
|
267
268
|
Wait until all (or specified) critical checks pass.
|
|
@@ -321,7 +322,7 @@ class AggregatedHealthResult:
|
|
|
321
322
|
|
|
322
323
|
status: HealthStatus
|
|
323
324
|
checks: list[HealthCheckResult] = field(default_factory=list)
|
|
324
|
-
message:
|
|
325
|
+
message: str | None = None
|
|
325
326
|
|
|
326
327
|
def to_dict(self) -> dict[str, Any]:
|
|
327
328
|
"""Convert to dictionary for JSON serialization."""
|
|
@@ -339,7 +340,7 @@ class AggregatedHealthResult:
|
|
|
339
340
|
# =============================================================================
|
|
340
341
|
|
|
341
342
|
|
|
342
|
-
def check_database(url:
|
|
343
|
+
def check_database(url: str | None) -> HealthCheckFn:
|
|
343
344
|
"""
|
|
344
345
|
Create a health check for a PostgreSQL database.
|
|
345
346
|
|
|
@@ -391,7 +392,7 @@ def check_database(url: Optional[str]) -> HealthCheckFn:
|
|
|
391
392
|
status=HealthStatus.HEALTHY,
|
|
392
393
|
latency_ms=(time.perf_counter() - start) * 1000,
|
|
393
394
|
)
|
|
394
|
-
except
|
|
395
|
+
except TimeoutError:
|
|
395
396
|
return HealthCheckResult(
|
|
396
397
|
name="database",
|
|
397
398
|
status=HealthStatus.UNHEALTHY,
|
|
@@ -417,7 +418,7 @@ def check_database(url: Optional[str]) -> HealthCheckFn:
|
|
|
417
418
|
return _check
|
|
418
419
|
|
|
419
420
|
|
|
420
|
-
def check_redis(url:
|
|
421
|
+
def check_redis(url: str | None) -> HealthCheckFn:
|
|
421
422
|
"""
|
|
422
423
|
Create a health check for Redis.
|
|
423
424
|
|
|
@@ -465,7 +466,7 @@ def check_redis(url: Optional[str]) -> HealthCheckFn:
|
|
|
465
466
|
)
|
|
466
467
|
finally:
|
|
467
468
|
await client.aclose()
|
|
468
|
-
except
|
|
469
|
+
except TimeoutError:
|
|
469
470
|
return HealthCheckResult(
|
|
470
471
|
name="redis",
|
|
471
472
|
status=HealthStatus.UNHEALTHY,
|
|
@@ -496,7 +497,7 @@ def check_url(
|
|
|
496
497
|
method: str = "GET",
|
|
497
498
|
expected_status: int = 200,
|
|
498
499
|
timeout: float = 5.0,
|
|
499
|
-
headers:
|
|
500
|
+
headers: dict[str, str] | None = None,
|
|
500
501
|
) -> HealthCheckFn:
|
|
501
502
|
"""
|
|
502
503
|
Create a health check for an HTTP endpoint.
|
|
@@ -622,7 +623,7 @@ def check_tcp(
|
|
|
622
623
|
status=HealthStatus.HEALTHY,
|
|
623
624
|
latency_ms=(time.perf_counter() - start) * 1000,
|
|
624
625
|
)
|
|
625
|
-
except
|
|
626
|
+
except TimeoutError:
|
|
626
627
|
return HealthCheckResult(
|
|
627
628
|
name=name,
|
|
628
629
|
status=HealthStatus.UNHEALTHY,
|
|
@@ -795,9 +796,7 @@ def add_startup_probe(
|
|
|
795
796
|
else:
|
|
796
797
|
# Log which checks failed
|
|
797
798
|
result = await registry.check_all()
|
|
798
|
-
failed = [
|
|
799
|
-
c.name for c in result.checks if c.status == HealthStatus.UNHEALTHY
|
|
800
|
-
]
|
|
799
|
+
failed = [c.name for c in result.checks if c.status == HealthStatus.UNHEALTHY]
|
|
801
800
|
error_msg = f"Dependencies not ready after {timeout}s: {failed}"
|
|
802
801
|
logger.error(error_msg)
|
|
803
802
|
raise RuntimeError(error_msg)
|
svc_infra/http/client.py
CHANGED
|
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import os
|
|
4
4
|
from contextvars import ContextVar
|
|
5
|
-
from typing import Any
|
|
5
|
+
from typing import Any
|
|
6
6
|
|
|
7
7
|
import httpx
|
|
8
8
|
|
|
@@ -22,7 +22,7 @@ def get_request_id() -> str | None:
|
|
|
22
22
|
return _request_id_ctx.get()
|
|
23
23
|
|
|
24
24
|
|
|
25
|
-
def _merge_request_id_header(headers:
|
|
25
|
+
def _merge_request_id_header(headers: dict[str, str] | None) -> dict[str, str]:
|
|
26
26
|
"""Merge X-Request-Id header into headers dict if request ID is set."""
|
|
27
27
|
result = dict(headers) if headers else {}
|
|
28
28
|
request_id = get_request_id()
|
|
@@ -59,9 +59,9 @@ def make_timeout(seconds: float | None = None) -> httpx.Timeout:
|
|
|
59
59
|
|
|
60
60
|
def new_httpx_client(
|
|
61
61
|
*,
|
|
62
|
-
timeout_seconds:
|
|
63
|
-
headers:
|
|
64
|
-
base_url:
|
|
62
|
+
timeout_seconds: float | None = None,
|
|
63
|
+
headers: dict[str, str] | None = None,
|
|
64
|
+
base_url: str | None = None,
|
|
65
65
|
propagate_request_id: bool = True,
|
|
66
66
|
**kwargs: Any,
|
|
67
67
|
) -> httpx.Client:
|
|
@@ -71,9 +71,7 @@ def new_httpx_client(
|
|
|
71
71
|
If propagate_request_id=True (default), X-Request-Id header is added from context.
|
|
72
72
|
"""
|
|
73
73
|
timeout = make_timeout(timeout_seconds)
|
|
74
|
-
merged_headers = (
|
|
75
|
-
_merge_request_id_header(headers) if propagate_request_id else headers
|
|
76
|
-
)
|
|
74
|
+
merged_headers = _merge_request_id_header(headers) if propagate_request_id else headers
|
|
77
75
|
# httpx doesn't accept base_url=None; only pass if non-None
|
|
78
76
|
client_kwargs = {"timeout": timeout, "headers": merged_headers, **kwargs}
|
|
79
77
|
if base_url is not None:
|
|
@@ -83,9 +81,9 @@ def new_httpx_client(
|
|
|
83
81
|
|
|
84
82
|
def new_async_httpx_client(
|
|
85
83
|
*,
|
|
86
|
-
timeout_seconds:
|
|
87
|
-
headers:
|
|
88
|
-
base_url:
|
|
84
|
+
timeout_seconds: float | None = None,
|
|
85
|
+
headers: dict[str, str] | None = None,
|
|
86
|
+
base_url: str | None = None,
|
|
89
87
|
propagate_request_id: bool = True,
|
|
90
88
|
**kwargs: Any,
|
|
91
89
|
) -> httpx.AsyncClient:
|
|
@@ -95,9 +93,7 @@ def new_async_httpx_client(
|
|
|
95
93
|
If propagate_request_id=True (default), X-Request-Id header is added from context.
|
|
96
94
|
"""
|
|
97
95
|
timeout = make_timeout(timeout_seconds)
|
|
98
|
-
merged_headers = (
|
|
99
|
-
_merge_request_id_header(headers) if propagate_request_id else headers
|
|
100
|
-
)
|
|
96
|
+
merged_headers = _merge_request_id_header(headers) if propagate_request_id else headers
|
|
101
97
|
# httpx doesn't accept base_url=None; only pass if non-None
|
|
102
98
|
client_kwargs = {"timeout": timeout, "headers": merged_headers, **kwargs}
|
|
103
99
|
if base_url is not None:
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
"""Background jobs module providing queue abstraction and worker utilities.
|
|
2
|
+
|
|
3
|
+
This module provides a flexible background job system with multiple backends:
|
|
4
|
+
|
|
5
|
+
- **InMemoryJobQueue**: Simple in-memory queue for tests and local development
|
|
6
|
+
- **RedisJobQueue**: Production-ready Redis-backed queue with visibility timeout
|
|
7
|
+
- **InMemoryScheduler**: Interval-based scheduler for periodic tasks
|
|
8
|
+
|
|
9
|
+
Example:
|
|
10
|
+
from svc_infra.jobs import easy_jobs, Job
|
|
11
|
+
|
|
12
|
+
# Initialize queue and scheduler (auto-detects Redis or uses memory)
|
|
13
|
+
queue, scheduler = easy_jobs()
|
|
14
|
+
|
|
15
|
+
# Enqueue a job
|
|
16
|
+
job = queue.enqueue("send_email", {"to": "user@example.com"})
|
|
17
|
+
print(f"Enqueued job: {job.id}")
|
|
18
|
+
|
|
19
|
+
# Process jobs with a worker
|
|
20
|
+
from svc_infra.jobs import process_one
|
|
21
|
+
|
|
22
|
+
async def handler(job: Job):
|
|
23
|
+
if job.name == "send_email":
|
|
24
|
+
await send_email(job.payload["to"])
|
|
25
|
+
|
|
26
|
+
await process_one(queue, handler)
|
|
27
|
+
|
|
28
|
+
Environment Variables:
|
|
29
|
+
JOBS_DRIVER: Backend driver ("memory" or "redis"), defaults to "memory"
|
|
30
|
+
REDIS_URL: Redis connection URL for redis driver
|
|
31
|
+
JOB_DEFAULT_TIMEOUT_SECONDS: Per-job execution timeout
|
|
32
|
+
JOBS_SCHEDULE_JSON: JSON array of scheduled task definitions
|
|
33
|
+
|
|
34
|
+
See Also:
|
|
35
|
+
- docs/jobs.md for detailed documentation
|
|
36
|
+
- svc_infra.jobs.builtins for webhook delivery and outbox processing
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
from __future__ import annotations
|
|
40
|
+
|
|
41
|
+
# Easy setup function
|
|
42
|
+
from .easy import easy_jobs
|
|
43
|
+
|
|
44
|
+
# Loader for schedule configuration
|
|
45
|
+
from .loader import schedule_from_env
|
|
46
|
+
|
|
47
|
+
# Core queue abstractions
|
|
48
|
+
from .queue import InMemoryJobQueue, Job, JobQueue
|
|
49
|
+
|
|
50
|
+
# Redis-backed queue for production
|
|
51
|
+
from .redis_queue import RedisJobQueue
|
|
52
|
+
|
|
53
|
+
# Runner for long-lived workers
|
|
54
|
+
from .runner import WorkerRunner
|
|
55
|
+
|
|
56
|
+
# Scheduler for periodic tasks
|
|
57
|
+
from .scheduler import InMemoryScheduler, ScheduledTask
|
|
58
|
+
|
|
59
|
+
# Worker utilities
|
|
60
|
+
from .worker import process_one
|
|
61
|
+
|
|
62
|
+
__all__ = [
|
|
63
|
+
# Core types
|
|
64
|
+
"Job",
|
|
65
|
+
"JobQueue",
|
|
66
|
+
# Queue implementations
|
|
67
|
+
"InMemoryJobQueue",
|
|
68
|
+
"RedisJobQueue",
|
|
69
|
+
# Scheduler
|
|
70
|
+
"InMemoryScheduler",
|
|
71
|
+
"ScheduledTask",
|
|
72
|
+
# Easy setup
|
|
73
|
+
"easy_jobs",
|
|
74
|
+
# Worker utilities
|
|
75
|
+
"process_one",
|
|
76
|
+
"WorkerRunner",
|
|
77
|
+
# Configuration loader
|
|
78
|
+
"schedule_from_env",
|
|
79
|
+
]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from
|
|
3
|
+
from collections.abc import Iterable
|
|
4
4
|
|
|
5
5
|
from svc_infra.db.outbox import OutboxStore
|
|
6
6
|
from svc_infra.jobs.queue import JobQueue
|
|
@@ -10,7 +10,7 @@ def make_outbox_tick(
|
|
|
10
10
|
outbox: OutboxStore,
|
|
11
11
|
queue: JobQueue,
|
|
12
12
|
*,
|
|
13
|
-
topics:
|
|
13
|
+
topics: Iterable[str] | None = None,
|
|
14
14
|
job_name_prefix: str = "outbox",
|
|
15
15
|
):
|
|
16
16
|
"""Return an async task function to move one outbox message into the job queue.
|
|
@@ -30,9 +30,7 @@ def make_outbox_tick(
|
|
|
30
30
|
if msg.id in dispatched:
|
|
31
31
|
return
|
|
32
32
|
job_name = f"{job_name_prefix}.{msg.topic}"
|
|
33
|
-
queue.enqueue(
|
|
34
|
-
job_name, {"outbox_id": msg.id, "topic": msg.topic, "payload": msg.payload}
|
|
35
|
-
)
|
|
33
|
+
queue.enqueue(job_name, {"outbox_id": msg.id, "topic": msg.topic, "payload": msg.payload})
|
|
36
34
|
# mark as dispatched (bump attempts) so it won't be re-enqueued by fetch_next
|
|
37
35
|
outbox.mark_failed(msg.id)
|
|
38
36
|
dispatched.add(msg.id)
|
|
@@ -39,9 +39,7 @@ def make_webhook_handler(
|
|
|
39
39
|
outbox.mark_processed(int(outbox_id))
|
|
40
40
|
return
|
|
41
41
|
event = payload.get("event") if isinstance(payload, dict) else None
|
|
42
|
-
subscription = (
|
|
43
|
-
payload.get("subscription") if isinstance(payload, dict) else None
|
|
44
|
-
)
|
|
42
|
+
subscription = payload.get("subscription") if isinstance(payload, dict) else None
|
|
45
43
|
if event is not None and subscription is not None:
|
|
46
44
|
delivery_payload = event
|
|
47
45
|
url = subscription.get("url") or get_webhook_url_for_topic(topic)
|
svc_infra/jobs/loader.py
CHANGED
|
@@ -5,7 +5,8 @@ import importlib
|
|
|
5
5
|
import json
|
|
6
6
|
import logging
|
|
7
7
|
import os
|
|
8
|
-
from
|
|
8
|
+
from collections.abc import Awaitable, Callable
|
|
9
|
+
from typing import cast
|
|
9
10
|
|
|
10
11
|
from .scheduler import InMemoryScheduler
|
|
11
12
|
|
|
@@ -17,7 +18,7 @@ def _resolve_target(path: str) -> Callable[[], Awaitable[None]]:
|
|
|
17
18
|
mod = importlib.import_module(mod_name)
|
|
18
19
|
fn = getattr(mod, func_name)
|
|
19
20
|
if asyncio.iscoroutinefunction(fn):
|
|
20
|
-
return cast(Callable[[], Awaitable[None]], fn)
|
|
21
|
+
return cast("Callable[[], Awaitable[None]]", fn)
|
|
21
22
|
|
|
22
23
|
# wrap sync into async
|
|
23
24
|
async def _wrapped():
|
|
@@ -26,9 +27,7 @@ def _resolve_target(path: str) -> Callable[[], Awaitable[None]]:
|
|
|
26
27
|
return _wrapped
|
|
27
28
|
|
|
28
29
|
|
|
29
|
-
def schedule_from_env(
|
|
30
|
-
scheduler: InMemoryScheduler, env_var: str = "JOBS_SCHEDULE_JSON"
|
|
31
|
-
) -> None:
|
|
30
|
+
def schedule_from_env(scheduler: InMemoryScheduler, env_var: str = "JOBS_SCHEDULE_JSON") -> None:
|
|
32
31
|
data = os.getenv(env_var)
|
|
33
32
|
if not data:
|
|
34
33
|
return
|
svc_infra/jobs/queue.py
CHANGED
|
@@ -4,8 +4,8 @@ import logging
|
|
|
4
4
|
import os
|
|
5
5
|
import warnings
|
|
6
6
|
from dataclasses import dataclass, field
|
|
7
|
-
from datetime import datetime, timedelta
|
|
8
|
-
from typing import Any,
|
|
7
|
+
from datetime import UTC, datetime, timedelta
|
|
8
|
+
from typing import Any, Protocol
|
|
9
9
|
|
|
10
10
|
logger = logging.getLogger(__name__)
|
|
11
11
|
|
|
@@ -33,21 +33,19 @@ def _check_inmemory_production_warning(class_name: str) -> None:
|
|
|
33
33
|
class Job:
|
|
34
34
|
id: str
|
|
35
35
|
name: str
|
|
36
|
-
payload:
|
|
37
|
-
available_at: datetime = field(default_factory=lambda: datetime.now(
|
|
36
|
+
payload: dict[str, Any]
|
|
37
|
+
available_at: datetime = field(default_factory=lambda: datetime.now(UTC))
|
|
38
38
|
attempts: int = 0
|
|
39
39
|
max_attempts: int = 5
|
|
40
40
|
backoff_seconds: int = 60 # base backoff for retry
|
|
41
|
-
last_error:
|
|
41
|
+
last_error: str | None = None
|
|
42
42
|
|
|
43
43
|
|
|
44
44
|
class JobQueue(Protocol):
|
|
45
|
-
def enqueue(
|
|
46
|
-
self, name: str, payload: Dict[str, Any], *, delay_seconds: int = 0
|
|
47
|
-
) -> Job:
|
|
45
|
+
def enqueue(self, name: str, payload: dict[str, Any], *, delay_seconds: int = 0) -> Job:
|
|
48
46
|
pass
|
|
49
47
|
|
|
50
|
-
def reserve_next(self) ->
|
|
48
|
+
def reserve_next(self) -> Job | None:
|
|
51
49
|
pass
|
|
52
50
|
|
|
53
51
|
def ack(self, job_id: str) -> None:
|
|
@@ -72,24 +70,16 @@ class InMemoryJobQueue:
|
|
|
72
70
|
self._seq += 1
|
|
73
71
|
return str(self._seq)
|
|
74
72
|
|
|
75
|
-
def enqueue(
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
when = datetime.now(timezone.utc) + timedelta(seconds=delay_seconds)
|
|
79
|
-
job = Job(
|
|
80
|
-
id=self._next_id(), name=name, payload=dict(payload), available_at=when
|
|
81
|
-
)
|
|
73
|
+
def enqueue(self, name: str, payload: dict[str, Any], *, delay_seconds: int = 0) -> Job:
|
|
74
|
+
when = datetime.now(UTC) + timedelta(seconds=delay_seconds)
|
|
75
|
+
job = Job(id=self._next_id(), name=name, payload=dict(payload), available_at=when)
|
|
82
76
|
self._jobs.append(job)
|
|
83
77
|
return job
|
|
84
78
|
|
|
85
|
-
def reserve_next(self) ->
|
|
86
|
-
now = datetime.now(
|
|
79
|
+
def reserve_next(self) -> Job | None:
|
|
80
|
+
now = datetime.now(UTC)
|
|
87
81
|
for job in self._jobs:
|
|
88
|
-
if
|
|
89
|
-
job.available_at <= now
|
|
90
|
-
and job.attempts >= 0
|
|
91
|
-
and job.attempts < job.max_attempts
|
|
92
|
-
):
|
|
82
|
+
if job.available_at <= now and job.attempts >= 0 and job.attempts < job.max_attempts:
|
|
93
83
|
job.attempts += 1
|
|
94
84
|
return job
|
|
95
85
|
return None
|
|
@@ -98,7 +88,7 @@ class InMemoryJobQueue:
|
|
|
98
88
|
self._jobs = [j for j in self._jobs if j.id != job_id]
|
|
99
89
|
|
|
100
90
|
def fail(self, job_id: str, *, error: str | None = None) -> None:
|
|
101
|
-
now = datetime.now(
|
|
91
|
+
now = datetime.now(UTC)
|
|
102
92
|
for job in self._jobs:
|
|
103
93
|
if job.id == job_id:
|
|
104
94
|
job.last_error = error
|
svc_infra/jobs/redis_queue.py
CHANGED
|
@@ -3,8 +3,8 @@ from __future__ import annotations
|
|
|
3
3
|
import json
|
|
4
4
|
import logging
|
|
5
5
|
from dataclasses import asdict
|
|
6
|
-
from datetime import
|
|
7
|
-
from typing import Any,
|
|
6
|
+
from datetime import UTC, datetime
|
|
7
|
+
from typing import Any, cast
|
|
8
8
|
|
|
9
9
|
from redis import Redis
|
|
10
10
|
|
|
@@ -41,9 +41,7 @@ class RedisJobQueue(JobQueue):
|
|
|
41
41
|
- {p}:dlq (LIST) dead-letter job ids
|
|
42
42
|
"""
|
|
43
43
|
|
|
44
|
-
def __init__(
|
|
45
|
-
self, client: Redis, *, prefix: str = "jobs", visibility_timeout: int = 60
|
|
46
|
-
):
|
|
44
|
+
def __init__(self, client: Redis, *, prefix: str = "jobs", visibility_timeout: int = 60):
|
|
47
45
|
self._r = client
|
|
48
46
|
self._p = prefix
|
|
49
47
|
self._vt = visibility_timeout
|
|
@@ -63,8 +61,8 @@ class RedisJobQueue(JobQueue):
|
|
|
63
61
|
return f"{self._p}:job:{job_id}"
|
|
64
62
|
|
|
65
63
|
# Core ops
|
|
66
|
-
def enqueue(self, name: str, payload:
|
|
67
|
-
now = datetime.now(
|
|
64
|
+
def enqueue(self, name: str, payload: dict, *, delay_seconds: int = 0) -> Job:
|
|
65
|
+
now = datetime.now(UTC)
|
|
68
66
|
job_id = str(self._r.incr(self._k("seq")))
|
|
69
67
|
job = Job(id=job_id, name=name, payload=dict(payload))
|
|
70
68
|
# Persist job
|
|
@@ -85,8 +83,8 @@ class RedisJobQueue(JobQueue):
|
|
|
85
83
|
return job
|
|
86
84
|
|
|
87
85
|
def _move_due_delayed_to_ready(self) -> None:
|
|
88
|
-
now_ts = int(datetime.now(
|
|
89
|
-
ids = cast(list[Any], self._r.zrangebyscore(self._k("delayed"), "-inf", now_ts))
|
|
86
|
+
now_ts = int(datetime.now(UTC).timestamp())
|
|
87
|
+
ids = cast("list[Any]", self._r.zrangebyscore(self._k("delayed"), "-inf", now_ts))
|
|
90
88
|
if not ids:
|
|
91
89
|
return
|
|
92
90
|
pipe = self._r.pipeline()
|
|
@@ -97,10 +95,8 @@ class RedisJobQueue(JobQueue):
|
|
|
97
95
|
pipe.execute()
|
|
98
96
|
|
|
99
97
|
def _requeue_timed_out_processing(self) -> None:
|
|
100
|
-
now_ts = int(datetime.now(
|
|
101
|
-
ids = cast(
|
|
102
|
-
list[Any], self._r.zrangebyscore(self._k("processing_vt"), "-inf", now_ts)
|
|
103
|
-
)
|
|
98
|
+
now_ts = int(datetime.now(UTC).timestamp())
|
|
99
|
+
ids = cast("list[Any]", self._r.zrangebyscore(self._k("processing_vt"), "-inf", now_ts))
|
|
104
100
|
if not ids:
|
|
105
101
|
return
|
|
106
102
|
pipe = self._r.pipeline()
|
|
@@ -113,14 +109,14 @@ class RedisJobQueue(JobQueue):
|
|
|
113
109
|
pipe.hdel(self._job_key(jid_s), "visible_at")
|
|
114
110
|
pipe.execute()
|
|
115
111
|
|
|
116
|
-
def reserve_next(self) ->
|
|
112
|
+
def reserve_next(self) -> Job | None:
|
|
117
113
|
# opportunistically move due delayed jobs
|
|
118
114
|
self._move_due_delayed_to_ready()
|
|
119
115
|
# move timed-out processing jobs back to ready before reserving
|
|
120
116
|
self._requeue_timed_out_processing()
|
|
121
117
|
|
|
122
118
|
# Calculate visibility timeout BEFORE reserve to prevent race condition
|
|
123
|
-
visible_at = int(datetime.now(
|
|
119
|
+
visible_at = int(datetime.now(UTC).timestamp()) + int(self._vt)
|
|
124
120
|
|
|
125
121
|
# Try atomic reserve using Lua script if available
|
|
126
122
|
# This prevents race condition where two workers could reserve the same job
|
|
@@ -139,26 +135,20 @@ class RedisJobQueue(JobQueue):
|
|
|
139
135
|
logger.warning("Lua script failed, using non-atomic reserve: %s", e)
|
|
140
136
|
jid = self._r.rpoplpush(self._k("ready"), self._k("processing"))
|
|
141
137
|
if jid:
|
|
142
|
-
job_id_tmp = (
|
|
143
|
-
jid.decode()
|
|
144
|
-
if isinstance(jid, (bytes, bytearray))
|
|
145
|
-
else str(jid)
|
|
146
|
-
)
|
|
138
|
+
job_id_tmp = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
|
|
147
139
|
self._r.zadd(self._k("processing_vt"), {job_id_tmp: visible_at})
|
|
148
140
|
else:
|
|
149
141
|
# Non-atomic fallback (for fakeredis in tests, or older Redis versions)
|
|
150
142
|
jid = self._r.rpoplpush(self._k("ready"), self._k("processing"))
|
|
151
143
|
if jid:
|
|
152
|
-
job_id_tmp = (
|
|
153
|
-
jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
|
|
154
|
-
)
|
|
144
|
+
job_id_tmp = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
|
|
155
145
|
self._r.zadd(self._k("processing_vt"), {job_id_tmp: visible_at})
|
|
156
146
|
|
|
157
147
|
if not jid:
|
|
158
148
|
return None
|
|
159
149
|
job_id = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
|
|
160
150
|
key = self._job_key(job_id)
|
|
161
|
-
data = cast(dict[Any, Any], self._r.hgetall(key))
|
|
151
|
+
data = cast("dict[Any, Any]", self._r.hgetall(key))
|
|
162
152
|
if not data:
|
|
163
153
|
# corrupted entry; ack and skip
|
|
164
154
|
self._r.lrem(self._k("processing"), 1, job_id)
|
|
@@ -166,7 +156,7 @@ class RedisJobQueue(JobQueue):
|
|
|
166
156
|
return None
|
|
167
157
|
|
|
168
158
|
# Decode fields
|
|
169
|
-
def _get(field: str, default:
|
|
159
|
+
def _get(field: str, default: str | None = None) -> str | None:
|
|
170
160
|
val = (
|
|
171
161
|
data.get(field.encode())
|
|
172
162
|
if isinstance(next(iter(data.keys())), bytes)
|
|
@@ -187,9 +177,7 @@ class RedisJobQueue(JobQueue):
|
|
|
187
177
|
payload = {}
|
|
188
178
|
available_at_str = _get("available_at")
|
|
189
179
|
available_at = (
|
|
190
|
-
datetime.fromisoformat(available_at_str)
|
|
191
|
-
if available_at_str
|
|
192
|
-
else datetime.now(timezone.utc)
|
|
180
|
+
datetime.fromisoformat(available_at_str) if available_at_str else datetime.now(UTC)
|
|
193
181
|
)
|
|
194
182
|
# If exceeded max_attempts → DLQ and skip
|
|
195
183
|
if attempts > max_attempts:
|
|
@@ -216,13 +204,13 @@ class RedisJobQueue(JobQueue):
|
|
|
216
204
|
|
|
217
205
|
def fail(self, job_id: str, *, error: str | None = None) -> None:
|
|
218
206
|
key = self._job_key(job_id)
|
|
219
|
-
data = cast(dict[Any, Any], self._r.hgetall(key))
|
|
207
|
+
data = cast("dict[Any, Any]", self._r.hgetall(key))
|
|
220
208
|
if not data:
|
|
221
209
|
# nothing to do
|
|
222
210
|
self._r.lrem(self._k("processing"), 1, job_id)
|
|
223
211
|
return
|
|
224
212
|
|
|
225
|
-
def _get(field: str, default:
|
|
213
|
+
def _get(field: str, default: str | None = None) -> str | None:
|
|
226
214
|
val = (
|
|
227
215
|
data.get(field.encode())
|
|
228
216
|
if isinstance(next(iter(data.keys())), bytes)
|
|
@@ -235,7 +223,7 @@ class RedisJobQueue(JobQueue):
|
|
|
235
223
|
attempts = int(_get("attempts", "0") or "0")
|
|
236
224
|
max_attempts = int(_get("max_attempts", "5") or "5")
|
|
237
225
|
backoff_seconds = int(_get("backoff_seconds", "60") or "60")
|
|
238
|
-
now_ts = int(datetime.now(
|
|
226
|
+
now_ts = int(datetime.now(UTC).timestamp())
|
|
239
227
|
# DLQ if at or beyond max_attempts
|
|
240
228
|
if attempts >= max_attempts:
|
|
241
229
|
self._r.lrem(self._k("processing"), 1, job_id)
|
|
@@ -246,9 +234,7 @@ class RedisJobQueue(JobQueue):
|
|
|
246
234
|
available_at_ts = now_ts + delay
|
|
247
235
|
mapping: dict[str, str] = {
|
|
248
236
|
"last_error": error or "",
|
|
249
|
-
"available_at": datetime.fromtimestamp(
|
|
250
|
-
available_at_ts, tz=timezone.utc
|
|
251
|
-
).isoformat(),
|
|
237
|
+
"available_at": datetime.fromtimestamp(available_at_ts, tz=UTC).isoformat(),
|
|
252
238
|
}
|
|
253
239
|
self._r.hset(key, mapping=mapping)
|
|
254
240
|
self._r.lrem(self._k("processing"), 1, job_id)
|