qx-cache 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qx/cache/__init__.py +20 -0
- qx/cache/client/__init__.py +96 -0
- qx/cache/distributed_lock/__init__.py +96 -0
- qx/cache/idempotency/__init__.py +172 -0
- qx/cache/py.typed +0 -0
- qx_cache-0.1.0.dist-info/METADATA +61 -0
- qx_cache-0.1.0.dist-info/RECORD +8 -0
- qx_cache-0.1.0.dist-info/WHEEL +4 -0
qx/cache/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""Qx cache layer."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from qx.cache.client import Cache, CacheSettings, create_client
|
|
6
|
+
from qx.cache.distributed_lock import DistributedLock, LockNotHeldError
|
|
7
|
+
from qx.cache.idempotency import IdempotencyConflictError, IdempotencyStore
|
|
8
|
+
|
|
9
|
+
__version__ = "0.1.0"
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"Cache",
|
|
13
|
+
"CacheSettings",
|
|
14
|
+
"DistributedLock",
|
|
15
|
+
"IdempotencyConflictError",
|
|
16
|
+
"IdempotencyStore",
|
|
17
|
+
"LockNotHeldError",
|
|
18
|
+
"__version__",
|
|
19
|
+
"create_client",
|
|
20
|
+
]
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
"""Async Redis client wrapper.
|
|
2
|
+
|
|
3
|
+
We use ``redis.asyncio`` directly — no custom wrapping of the data API. This
|
|
4
|
+
module supplies:
|
|
5
|
+
|
|
6
|
+
- ``CacheSettings`` (Pydantic settings, ``QX_CACHE__*`` env vars).
|
|
7
|
+
- ``create_client(settings)`` — builds the singleton client with pooling.
|
|
8
|
+
- A trivial ``Cache`` facade with the half-dozen methods we actually use across
|
|
9
|
+
the framework (``get_json`` / ``set_json`` / ``incr`` / ``expire``). Anyone
|
|
10
|
+
needing more reaches for the underlying ``Redis`` instance.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
from typing import Any, ClassVar
|
|
17
|
+
|
|
18
|
+
import redis.asyncio as aioredis
|
|
19
|
+
from pydantic import Field
|
|
20
|
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
21
|
+
|
|
22
|
+
__all__ = ["Cache", "CacheSettings", "create_client"]
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class CacheSettings(BaseSettings):
|
|
26
|
+
model_config: ClassVar[SettingsConfigDict] = SettingsConfigDict(
|
|
27
|
+
env_prefix="QX_CACHE__",
|
|
28
|
+
extra="ignore",
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
url: str = Field(default="redis://localhost:6379/0")
|
|
32
|
+
max_connections: int = 20
|
|
33
|
+
decode_responses: bool = False # we decode ourselves where needed
|
|
34
|
+
socket_timeout_seconds: float = 2.0
|
|
35
|
+
socket_connect_timeout_seconds: float = 2.0
|
|
36
|
+
health_check_interval_seconds: int = 30
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def create_client(settings: CacheSettings) -> aioredis.Redis:
|
|
40
|
+
"""Create a pooled async Redis client.
|
|
41
|
+
|
|
42
|
+
Idempotent: the same settings produce the same pool topology. Dispose with
|
|
43
|
+
``await client.aclose()`` at shutdown.
|
|
44
|
+
"""
|
|
45
|
+
pool = aioredis.ConnectionPool.from_url(
|
|
46
|
+
settings.url,
|
|
47
|
+
max_connections=settings.max_connections,
|
|
48
|
+
decode_responses=settings.decode_responses,
|
|
49
|
+
socket_timeout=settings.socket_timeout_seconds,
|
|
50
|
+
socket_connect_timeout=settings.socket_connect_timeout_seconds,
|
|
51
|
+
health_check_interval=settings.health_check_interval_seconds,
|
|
52
|
+
)
|
|
53
|
+
return aioredis.Redis(connection_pool=pool)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class Cache:
|
|
57
|
+
"""Thin facade over Redis for the framework's own use.
|
|
58
|
+
|
|
59
|
+
Services can take this for the common ops, or pull the raw client out of
|
|
60
|
+
DI (``Redis`` is registered alongside ``Cache``) when they need more.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
def __init__(self, client: aioredis.Redis, *, namespace: str = "qx") -> None:
|
|
64
|
+
self._r = client
|
|
65
|
+
self._ns = namespace
|
|
66
|
+
|
|
67
|
+
def key(self, *parts: str) -> str:
|
|
68
|
+
return ":".join((self._ns, *parts))
|
|
69
|
+
|
|
70
|
+
async def get_json(self, key: str) -> Any | None:
|
|
71
|
+
raw = await self._r.get(key)
|
|
72
|
+
if raw is None:
|
|
73
|
+
return None
|
|
74
|
+
return json.loads(raw)
|
|
75
|
+
|
|
76
|
+
async def set_json(
|
|
77
|
+
self,
|
|
78
|
+
key: str,
|
|
79
|
+
value: Any,
|
|
80
|
+
*,
|
|
81
|
+
ttl_seconds: int | None = None,
|
|
82
|
+
) -> None:
|
|
83
|
+
payload = json.dumps(value, default=str)
|
|
84
|
+
await self._r.set(key, payload, ex=ttl_seconds)
|
|
85
|
+
|
|
86
|
+
async def delete(self, key: str) -> bool:
|
|
87
|
+
return bool(await self._r.delete(key))
|
|
88
|
+
|
|
89
|
+
async def incr(self, key: str, *, by: int = 1) -> int:
|
|
90
|
+
return await self._r.incrby(key, by) # type: ignore[no-any-return]
|
|
91
|
+
|
|
92
|
+
async def expire(self, key: str, seconds: int) -> bool:
|
|
93
|
+
return await self._r.expire(key, seconds) # type: ignore[no-any-return]
|
|
94
|
+
|
|
95
|
+
async def exists(self, key: str) -> bool:
|
|
96
|
+
return bool(await self._r.exists(key))
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
"""Distributed lock.
|
|
2
|
+
|
|
3
|
+
A simple ``SET NX EX`` lock with a token, for short-duration critical sections
|
|
4
|
+
(leader-election bootstrap, outbox-relay singleton enforcement). NOT a
|
|
5
|
+
replacement for proper coordination — for anything mission-critical, use a
|
|
6
|
+
real consensus protocol (etcd, ZooKeeper) or a transactional advisory lock
|
|
7
|
+
(``pg_advisory_lock``).
|
|
8
|
+
|
|
9
|
+
The lock acquires a UUID token and stores it as the value; release runs a Lua
|
|
10
|
+
script that only deletes the key if the value still matches the token. This
|
|
11
|
+
prevents the classic "lock expired, someone else acquired, then I release
|
|
12
|
+
their lock" footgun.
|
|
13
|
+
|
|
14
|
+
Lock TTL is intentionally short (default 30s). If your critical section is
|
|
15
|
+
longer, you have a different problem; consider whether the work belongs in
|
|
16
|
+
the request path at all.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import contextlib
|
|
22
|
+
from typing import TYPE_CHECKING
|
|
23
|
+
from uuid import uuid4
|
|
24
|
+
|
|
25
|
+
if TYPE_CHECKING:
|
|
26
|
+
from collections.abc import AsyncIterator
|
|
27
|
+
|
|
28
|
+
import redis.asyncio as aioredis
|
|
29
|
+
|
|
30
|
+
__all__ = ["DistributedLock", "LockNotHeldError"]
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
_RELEASE_SCRIPT = """
|
|
34
|
+
if redis.call('GET', KEYS[1]) == ARGV[1] then
|
|
35
|
+
return redis.call('DEL', KEYS[1])
|
|
36
|
+
end
|
|
37
|
+
return 0
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class LockNotHeldError(Exception):
|
|
42
|
+
"""Raised on release when the lock was already lost (timed out, stolen)."""
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class DistributedLock:
|
|
46
|
+
"""A single named lock instance.
|
|
47
|
+
|
|
48
|
+
Construct via ``DistributedLock(client, "leader:outbox-relay")`` once;
|
|
49
|
+
reuse to acquire/release.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
def __init__(self, client: aioredis.Redis, name: str) -> None:
|
|
53
|
+
self._r = client
|
|
54
|
+
self._name = name
|
|
55
|
+
self._token: str | None = None
|
|
56
|
+
|
|
57
|
+
async def acquire(self, *, ttl_seconds: int = 30) -> bool:
|
|
58
|
+
"""Try to acquire. Returns True on success, False if already held."""
|
|
59
|
+
token = str(uuid4())
|
|
60
|
+
# SET key token NX EX ttl — atomic. Returns OK only if not held.
|
|
61
|
+
ok = await self._r.set(self._name, token, nx=True, ex=ttl_seconds)
|
|
62
|
+
if ok:
|
|
63
|
+
self._token = token
|
|
64
|
+
return True
|
|
65
|
+
return False
|
|
66
|
+
|
|
67
|
+
async def renew(self, *, ttl_seconds: int = 30) -> bool:
|
|
68
|
+
"""Extend TTL if we still hold the lock."""
|
|
69
|
+
if self._token is None:
|
|
70
|
+
return False
|
|
71
|
+
# Lua so the check-and-extend is atomic.
|
|
72
|
+
script = """
|
|
73
|
+
if redis.call('GET', KEYS[1]) == ARGV[1] then
|
|
74
|
+
return redis.call('EXPIRE', KEYS[1], ARGV[2])
|
|
75
|
+
end
|
|
76
|
+
return 0
|
|
77
|
+
"""
|
|
78
|
+
result = await self._r.eval(script, 1, self._name, self._token, str(ttl_seconds)) # type: ignore[misc]
|
|
79
|
+
return bool(result)
|
|
80
|
+
|
|
81
|
+
async def release(self) -> None:
|
|
82
|
+
"""Release the lock — only deletes if the token still matches."""
|
|
83
|
+
if self._token is None:
|
|
84
|
+
return
|
|
85
|
+
await self._r.eval(_RELEASE_SCRIPT, 1, self._name, self._token) # type: ignore[misc]
|
|
86
|
+
self._token = None
|
|
87
|
+
|
|
88
|
+
@contextlib.asynccontextmanager
|
|
89
|
+
async def acquired(self, *, ttl_seconds: int = 30) -> AsyncIterator[bool]:
|
|
90
|
+
"""Async context manager that yields whether we got the lock."""
|
|
91
|
+
ok = await self.acquire(ttl_seconds=ttl_seconds)
|
|
92
|
+
try:
|
|
93
|
+
yield ok
|
|
94
|
+
finally:
|
|
95
|
+
if ok:
|
|
96
|
+
await self.release()
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
"""Idempotency store.
|
|
2
|
+
|
|
3
|
+
Implements the ``Idempotency-Key`` header pattern: a client sends a unique key
|
|
4
|
+
with a request; if the same key + payload arrives again within the TTL, the
|
|
5
|
+
server replays the stored response instead of re-executing the command.
|
|
6
|
+
|
|
7
|
+
Storage shape (Redis hash, one per key)::
|
|
8
|
+
|
|
9
|
+
qx:idem:{tenant}:{key}
|
|
10
|
+
status -> "in_progress" | "completed"
|
|
11
|
+
request_fp -> sha256 of the canonical request payload
|
|
12
|
+
response -> JSON of the original response
|
|
13
|
+
created_at -> ISO timestamp
|
|
14
|
+
|
|
15
|
+
The ``request_fp`` lets us distinguish two clients accidentally reusing the
|
|
16
|
+
same key for different payloads — we 409 in that case rather than returning
|
|
17
|
+
the wrong response. That's the difference between "idempotent" and "merely
|
|
18
|
+
de-duplicated".
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from __future__ import annotations
|
|
22
|
+
|
|
23
|
+
import hashlib
|
|
24
|
+
import json
|
|
25
|
+
from datetime import UTC, datetime
|
|
26
|
+
from typing import TYPE_CHECKING, Any
|
|
27
|
+
|
|
28
|
+
from qx.core import (
|
|
29
|
+
ConflictError,
|
|
30
|
+
InfrastructureError,
|
|
31
|
+
PreconditionFailedError,
|
|
32
|
+
Result,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
if TYPE_CHECKING:
|
|
36
|
+
import redis.asyncio as aioredis
|
|
37
|
+
|
|
38
|
+
__all__ = ["IdempotencyConflictError", "IdempotencyStore"]
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class IdempotencyConflictError(PreconditionFailedError):
|
|
42
|
+
"""Same idempotency key, different payload — refuse to serve."""
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class IdempotencyStore:
|
|
46
|
+
"""Store + retrieve idempotent operation outcomes."""
|
|
47
|
+
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
client: aioredis.Redis,
|
|
51
|
+
*,
|
|
52
|
+
namespace: str = "qx:idem",
|
|
53
|
+
default_ttl_seconds: int = 24 * 3600,
|
|
54
|
+
) -> None:
|
|
55
|
+
self._r = client
|
|
56
|
+
self._ns = namespace
|
|
57
|
+
self._ttl = default_ttl_seconds
|
|
58
|
+
|
|
59
|
+
def _key(self, tenant: str, idem_key: str) -> str:
|
|
60
|
+
return f"{self._ns}:{tenant}:{idem_key}"
|
|
61
|
+
|
|
62
|
+
@staticmethod
|
|
63
|
+
def fingerprint(payload: Any) -> str:
|
|
64
|
+
"""Stable sha256 of a request payload."""
|
|
65
|
+
return hashlib.sha256(
|
|
66
|
+
json.dumps(payload, sort_keys=True, separators=(",", ":"), default=str).encode()
|
|
67
|
+
).hexdigest()
|
|
68
|
+
|
|
69
|
+
async def begin(
|
|
70
|
+
self,
|
|
71
|
+
tenant: str,
|
|
72
|
+
idem_key: str,
|
|
73
|
+
payload: Any,
|
|
74
|
+
*,
|
|
75
|
+
ttl_seconds: int | None = None,
|
|
76
|
+
) -> Result[None | dict[str, Any]]:
|
|
77
|
+
"""Attempt to claim the key.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
``Result.success(None)`` — caller may proceed (first time we see this key).
|
|
81
|
+
``Result.success(stored_response)`` — replay the stored response.
|
|
82
|
+
``Result.failure(IdempotencyConflictError)`` — same key, different
|
|
83
|
+
payload; client should retry with a fresh key or fix their bug.
|
|
84
|
+
"""
|
|
85
|
+
key = self._key(tenant, idem_key)
|
|
86
|
+
fp = self.fingerprint(payload)
|
|
87
|
+
ttl = ttl_seconds or self._ttl
|
|
88
|
+
|
|
89
|
+
# We try to SETNX-style claim the slot atomically. WATCH+MULTI in Redis
|
|
90
|
+
# is the canonical pattern; here we use a small Lua script for clarity.
|
|
91
|
+
# 1) if no key, write status=in_progress + fp; return "claim"
|
|
92
|
+
# 2) if key exists with same fp: return whatever is stored
|
|
93
|
+
# 3) if key exists with different fp: return "conflict"
|
|
94
|
+
script = """
|
|
95
|
+
if redis.call('EXISTS', KEYS[1]) == 0 then
|
|
96
|
+
redis.call('HSET', KEYS[1], 'status', 'in_progress',
|
|
97
|
+
'request_fp', ARGV[1], 'created_at', ARGV[2])
|
|
98
|
+
redis.call('EXPIRE', KEYS[1], ARGV[3])
|
|
99
|
+
return 'claim'
|
|
100
|
+
end
|
|
101
|
+
local stored_fp = redis.call('HGET', KEYS[1], 'request_fp')
|
|
102
|
+
if stored_fp ~= ARGV[1] then
|
|
103
|
+
return 'conflict'
|
|
104
|
+
end
|
|
105
|
+
local status = redis.call('HGET', KEYS[1], 'status')
|
|
106
|
+
if status == 'completed' then
|
|
107
|
+
return redis.call('HGET', KEYS[1], 'response')
|
|
108
|
+
end
|
|
109
|
+
return 'in_progress'
|
|
110
|
+
"""
|
|
111
|
+
try:
|
|
112
|
+
result = await self._r.eval( # type: ignore[misc]
|
|
113
|
+
script,
|
|
114
|
+
1,
|
|
115
|
+
key,
|
|
116
|
+
fp,
|
|
117
|
+
datetime.now(UTC).isoformat(),
|
|
118
|
+
str(ttl),
|
|
119
|
+
)
|
|
120
|
+
except Exception as exc:
|
|
121
|
+
return Result.failure(
|
|
122
|
+
InfrastructureError(
|
|
123
|
+
code="idempotency.store_unavailable",
|
|
124
|
+
message=f"idempotency store error: {exc}",
|
|
125
|
+
cause=exc,
|
|
126
|
+
)
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
if result in {b"claim", "claim"}:
|
|
130
|
+
return Result.success(None)
|
|
131
|
+
if result in {b"conflict", "conflict"}:
|
|
132
|
+
return Result.failure(
|
|
133
|
+
IdempotencyConflictError(
|
|
134
|
+
code="idempotency.payload_mismatch",
|
|
135
|
+
message=(
|
|
136
|
+
"Idempotency-Key already in use with a different request payload. "
|
|
137
|
+
"Use a new key for a different request."
|
|
138
|
+
),
|
|
139
|
+
)
|
|
140
|
+
)
|
|
141
|
+
if result in {b"in_progress", "in_progress"}:
|
|
142
|
+
return Result.failure(
|
|
143
|
+
ConflictError(
|
|
144
|
+
code="idempotency.in_progress",
|
|
145
|
+
message=(
|
|
146
|
+
"A request with this Idempotency-Key is already in progress. Retry shortly."
|
|
147
|
+
),
|
|
148
|
+
)
|
|
149
|
+
)
|
|
150
|
+
# Otherwise: a stored response payload
|
|
151
|
+
raw = result if isinstance(result, bytes) else result.encode()
|
|
152
|
+
return Result.success(json.loads(raw))
|
|
153
|
+
|
|
154
|
+
async def complete(
|
|
155
|
+
self,
|
|
156
|
+
tenant: str,
|
|
157
|
+
idem_key: str,
|
|
158
|
+
response: Any,
|
|
159
|
+
*,
|
|
160
|
+
ttl_seconds: int | None = None,
|
|
161
|
+
) -> None:
|
|
162
|
+
"""Persist the response and mark the slot as completed."""
|
|
163
|
+
key = self._key(tenant, idem_key)
|
|
164
|
+
ttl = ttl_seconds or self._ttl
|
|
165
|
+
await self._r.hset( # type: ignore[misc]
|
|
166
|
+
key,
|
|
167
|
+
mapping={
|
|
168
|
+
"status": "completed",
|
|
169
|
+
"response": json.dumps(response, default=str),
|
|
170
|
+
},
|
|
171
|
+
)
|
|
172
|
+
await self._r.expire(key, ttl)
|
qx/cache/py.typed
ADDED
|
File without changes
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: qx-cache
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Qx cache layer: Redis client, idempotency store, distributed locks
|
|
5
|
+
Author: Qx Engineering
|
|
6
|
+
License: MIT
|
|
7
|
+
Requires-Python: >=3.14
|
|
8
|
+
Requires-Dist: qx-core
|
|
9
|
+
Requires-Dist: qx-di
|
|
10
|
+
Requires-Dist: redis[hiredis]>=5.0.0
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
|
|
13
|
+
# qx-cache
|
|
14
|
+
|
|
15
|
+
Redis client, Lua-atomic idempotency store, and distributed lock for the Qx framework.
|
|
16
|
+
|
|
17
|
+
## What lives here
|
|
18
|
+
|
|
19
|
+
- **`qx.cache.Cache`** — thin async Redis client wrapper with typed `get`/`set`/`delete`/`exists` methods and TTL support.
|
|
20
|
+
- **`qx.cache.CacheSettings`** — Pydantic settings for Redis connection URL.
|
|
21
|
+
- **`qx.cache.create_client`** — async factory that opens and validates a Redis connection.
|
|
22
|
+
- **`qx.cache.IdempotencyStore`** — Lua-script-based atomic check-and-set. A single round-trip to Redis either claims an idempotency key (first caller wins) or returns the cached result (all subsequent callers). Used by `IdempotencyBehavior` in the Mediator pipeline to make command handlers idempotent.
|
|
23
|
+
- **`qx.cache.DistributedLock`** — Redis-backed advisory lock with TTL and async context-manager interface. Uses `SET NX PX` for acquisition and Lua for safe release (only the holder can release).
|
|
24
|
+
- **`qx.cache.LockNotHeldError`** — raised when attempting to release a lock that has expired or was never acquired.
|
|
25
|
+
|
|
26
|
+
## Usage
|
|
27
|
+
|
|
28
|
+
### Idempotency store
|
|
29
|
+
|
|
30
|
+
```python
|
|
31
|
+
from qx.cache import IdempotencyStore, create_client
|
|
32
|
+
|
|
33
|
+
redis = await create_client(settings.cache.url)
|
|
34
|
+
store = IdempotencyStore(redis, ttl_seconds=3600)
|
|
35
|
+
|
|
36
|
+
# In a command handler or pipeline behavior:
|
|
37
|
+
key = f"create-order:{cmd.idempotency_key}"
|
|
38
|
+
cached = await store.get(key)
|
|
39
|
+
if cached is not None:
|
|
40
|
+
return Result.success(cached)
|
|
41
|
+
|
|
42
|
+
result = await do_work(cmd)
|
|
43
|
+
if result.is_success:
|
|
44
|
+
await store.set(key, result.value)
|
|
45
|
+
return result
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
### Distributed lock
|
|
49
|
+
|
|
50
|
+
```python
|
|
51
|
+
from qx.cache import DistributedLock
|
|
52
|
+
|
|
53
|
+
async with DistributedLock(redis, "outbox-relay-leader", ttl_seconds=30):
|
|
54
|
+
await relay.run_once()
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
## Design rules
|
|
58
|
+
|
|
59
|
+
- **Lua atomicity** — `IdempotencyStore` uses a single Lua script for the check-and-set so there is no window between checking and setting, even under concurrent requests.
|
|
60
|
+
- **No silent failures** — `DistributedLock` raises `LockNotHeldError` if the lock has expired before you release it, so callers know their critical section may have overlapped.
|
|
61
|
+
- The cache layer has no dependency on `qx-db` or `qx-cqrs`. It can be used standalone.
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
qx/cache/__init__.py,sha256=TjMVdkN0mFPIq7m2Zbq_zYkzX6qj7I0VC2IDk1zxMw0,486
|
|
2
|
+
qx/cache/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
+
qx/cache/client/__init__.py,sha256=7PQVCpCd-JPBahH3g9ftctESjG6GiJCD1qkyyyw50yA,3216
|
|
4
|
+
qx/cache/distributed_lock/__init__.py,sha256=Wpm3sq_67oz0VtyS0WAGWIxybU5EUIVS__PphcnfrHo,3205
|
|
5
|
+
qx/cache/idempotency/__init__.py,sha256=gx_Iqw_6wyCQGVZKouKw-ekS3XolontiTmr6UhuFHnE,5843
|
|
6
|
+
qx_cache-0.1.0.dist-info/METADATA,sha256=bXqybQT_Q0jPYAm6mIixxCLvKuZaXOPuqwekoEKRqzs,2469
|
|
7
|
+
qx_cache-0.1.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
|
|
8
|
+
qx_cache-0.1.0.dist-info/RECORD,,
|