pystackquery 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,77 @@
1
+ """
2
+ PyStackQuery - Async state management and caching for Python.
3
+
4
+ Inspired by TanStack Query, this library provides a robust L1/L2 caching
5
+ architecture designed for async Python applications (FastAPI, CLI, GUI).
6
+
7
+ Core Features:
8
+ - SWR (Stale-While-Revalidate) caching logic
9
+ - Background L2 hydration (Redis, SQLite, etc.)
10
+ - Request deduplication and exponential backoff retries
11
+ - Reactive state observers with synchronous subscription
12
+ - Mutations with optimistic update support
13
+
14
+ Example:
15
+ from pystackquery import QueryClient, QueryOptions
16
+
17
+ client = QueryClient()
18
+
19
+ # Create a reactive observer
20
+ observer = client.watch(
21
+ QueryOptions(query_key=("users",), query_fn=fetch_users)
22
+ )
23
+
24
+ # Subscribe is synchronous
25
+ unsubscribe = observer.subscribe(lambda state: print(state.data))
26
+ """
27
+
28
+ from .cache import QueryCache
29
+ from .client import QueryClient, QueryClientConfig
30
+ from .convenience import CachedQuery, dependent_query, parallel_queries, query
31
+ from .helpers import default_retry_delay, hash_key, partial_match
32
+ from .mutation import Mutation
33
+ from .observer import QueryObserver
34
+ from .options import MutationOptions, QueryOptions
35
+ from .query import Query
36
+ from .state import (
37
+ FetchStatus,
38
+ MutationState,
39
+ MutationStatus,
40
+ QueryState,
41
+ QueryStatus,
42
+ )
43
+ from .types import QueryKey, StorageBackend
44
+
45
+ __version__ = "1.0.1"
46
+
47
+ __all__ = [
48
+ # Client
49
+ "QueryClient",
50
+ "QueryClientConfig",
51
+ # Query
52
+ "Query",
53
+ "QueryOptions",
54
+ "QueryObserver",
55
+ "QueryState",
56
+ "QueryStatus",
57
+ "FetchStatus",
58
+ # Mutation
59
+ "Mutation",
60
+ "MutationOptions",
61
+ "MutationState",
62
+ "MutationStatus",
63
+ # Cache
64
+ "QueryCache",
65
+ # Types
66
+ "QueryKey",
67
+ "StorageBackend",
68
+ # Helpers
69
+ "hash_key",
70
+ "partial_match",
71
+ "default_retry_delay",
72
+ # Convenience
73
+ "parallel_queries",
74
+ "dependent_query",
75
+ "query",
76
+ "CachedQuery",
77
+ ]
pystackquery/cache.py ADDED
@@ -0,0 +1,135 @@
1
+ """
2
+ Query cache with LRU eviction.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import logging
8
+ from collections import OrderedDict
9
+ from typing import TYPE_CHECKING, cast
10
+
11
+ from .helpers import partial_match
12
+ from .types import QueryKey, StorageBackend
13
+
14
+ if TYPE_CHECKING:
15
+ from .query import Query
16
+
17
+ logger = logging.getLogger("pystackquery")
18
+
19
+
20
+ class QueryCache:
21
+ """
22
+ In-memory cache (L1) for Query instances.
23
+
24
+ Optionally manages a secondary persistent storage (L2).
25
+
26
+ Features:
27
+ - O(1) lookup and insertion
28
+ - LRU eviction when max_size is reached
29
+ - Partial key matching for bulk invalidation
30
+
31
+ Attributes:
32
+ max_size: Maximum number of queries to cache in memory.
33
+ storage: Optional persistent storage backend.
34
+ """
35
+
36
+ __slots__ = ("_queries", "_max_size", "storage")
37
+
38
+ def __init__(
39
+ self,
40
+ max_size: int = 1000,
41
+ storage: StorageBackend | None = None,
42
+ ) -> None:
43
+ """
44
+ Initialize the cache.
45
+
46
+ Args:
47
+ max_size: Maximum number of queries to store in L1.
48
+ storage: Optional L2 storage backend.
49
+ """
50
+ # We store queries as object to avoid Any, but cast when retrieving
51
+ self._queries: OrderedDict[str, Query[object]] = OrderedDict()
52
+ self._max_size: int = max_size
53
+ self.storage: StorageBackend | None = storage
54
+
55
+ def get[T](self, key_hash: str) -> Query[T] | None:
56
+ """
57
+ Get a query from L1 by its key hash.
58
+
59
+ Moves the query to the end (most recently used).
60
+
61
+ Args:
62
+ key_hash: The hash of the query key.
63
+
64
+ Returns:
65
+ The Query if found in L1, None otherwise.
66
+ """
67
+ if key_hash in self._queries:
68
+ self._queries.move_to_end(key_hash)
69
+ return cast("Query[T]", self._queries[key_hash])
70
+ return None
71
+
72
+ def add[T](self, query: Query[T]) -> None:
73
+ """
74
+ Add a query to the cache.
75
+
76
+ Evicts the least recently used query if cache is full.
77
+
78
+ Args:
79
+ query: The Query to add.
80
+ """
81
+ if len(self._queries) >= self._max_size:
82
+ _, evicted = self._queries.popitem(last=False)
83
+ evicted.destroy()
84
+ if logger.isEnabledFor(logging.DEBUG):
85
+ logger.debug("LRU eviction: %s", evicted.key)
86
+
87
+ self._queries[query.key_hash] = cast("Query[object]", query)
88
+
89
+ # Wire up GC removal callback
90
+ def gc_ready() -> None:
91
+ self.remove(query.key_hash)
92
+
93
+ query._notify_gc_ready = gc_ready
94
+
95
+ def remove(self, key_hash: str) -> None:
96
+ """
97
+ Remove a query from the cache.
98
+
99
+ Args:
100
+ key_hash: The hash of the query key.
101
+ """
102
+ if key_hash in self._queries:
103
+ query = self._queries.pop(key_hash)
104
+ query.destroy()
105
+ if logger.isEnabledFor(logging.DEBUG):
106
+ logger.debug("Removed query %s from cache", query.key)
107
+
108
+ def find_all(self, filter_key: QueryKey | None = None) -> list[Query[object]]:
109
+ """
110
+ Find all queries matching the filter.
111
+
112
+ Args:
113
+ filter_key: If provided, only return queries where filter_key
114
+ is a prefix of the query key. If None, return all.
115
+
116
+ Returns:
117
+ List of matching queries.
118
+ """
119
+ if filter_key is None:
120
+ return list(self._queries.values())
121
+ return [q for q in self._queries.values() if partial_match(filter_key, q.key)]
122
+
123
+ def clear(self) -> None:
124
+ """Remove all queries from the cache."""
125
+ for query in self._queries.values():
126
+ query.destroy()
127
+ self._queries.clear()
128
+
129
+ def __len__(self) -> int:
130
+ """Return the number of cached queries."""
131
+ return len(self._queries)
132
+
133
+ def __contains__(self, key_hash: str) -> bool:
134
+ """Check if a query is in the cache."""
135
+ return key_hash in self._queries
pystackquery/client.py ADDED
@@ -0,0 +1,219 @@
1
+ """
2
+ QueryClient - Central orchestrator for state management and caching.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import asyncio
8
+ import json
9
+ import logging
10
+ import time
11
+ from dataclasses import dataclass, field
12
+ from typing import cast
13
+
14
+ from .cache import QueryCache
15
+ from .helpers import default_retry_delay, hash_key
16
+ from .mutation import Mutation
17
+ from .observer import QueryObserver
18
+ from .options import MutationOptions, QueryOptions
19
+ from .query import Query
20
+ from .state import FetchStatus, QueryState, QueryStatus
21
+ from .types import QueryKey, RetryDelayFn, StorageBackend
22
+
23
+ logger = logging.getLogger("pystackquery")
24
+
25
+
26
+ @dataclass
27
+ class QueryClientConfig:
28
+ """
29
+ Global defaults for the client instance. Individual queries can override these.
30
+ """
31
+ stale_time: float = 0.0
32
+ gc_time: float = 300.0
33
+ retry: int = 3
34
+ retry_delay: RetryDelayFn = field(default_factory=lambda: default_retry_delay)
35
+ cache_max_size: int = 1000
36
+ storage: StorageBackend | None = None
37
+
38
+
39
+ class QueryClient:
40
+ """
41
+ Main entry point for PyStackQuery.
42
+ Manages the dual-tier cache (L1 memory, L2 persistent).
43
+ """
44
+
45
+ __slots__ = ("_config", "_cache")
46
+
47
+ def __init__(self, config: QueryClientConfig | None = None) -> None:
48
+ self._config: QueryClientConfig = config or QueryClientConfig()
49
+ self._cache: QueryCache = QueryCache(
50
+ max_size=self._config.cache_max_size, storage=self._config.storage
51
+ )
52
+
53
+ @property
54
+ def cache(self) -> QueryCache:
55
+ return self._cache
56
+
57
+ def _get_or_create_query[T](self, options: QueryOptions[T]) -> Query[T]:
58
+ """
59
+ Retrieves a query from L1 or creates it fresh.
60
+ If L2 storage exists, a background hydration task is kicked off immediately.
61
+ """
62
+ key_hash = options.get_key_hash()
63
+
64
+ # L1 Hit (Fastest)
65
+ existing: Query[T] | None = self._cache.get(key_hash)
66
+ if existing is not None:
67
+ return existing
68
+
69
+ # Cache Miss - Setup new instance and start hydration
70
+ query: Query[T] = self._create_query_instance(options)
71
+ self._cache.add(query)
72
+
73
+ if self._cache.storage:
74
+ try:
75
+ loop = asyncio.get_running_loop()
76
+ loop.create_task(query.hydrate())
77
+ except RuntimeError:
78
+ pass # Sync context or loop shutting down
79
+
80
+ return query
81
+
82
+ def _create_query_instance[T](self, options: QueryOptions[T]) -> Query[T]:
83
+ """Inherits client-level defaults for query instances."""
84
+ if options.stale_time == 0.0 and self._config.stale_time != 0.0:
85
+ options.stale_time = self._config.stale_time
86
+ if options.gc_time == 300.0 and self._config.gc_time != 300.0:
87
+ options.gc_time = self._config.gc_time
88
+ if options.retry == 3 and self._config.retry != 3:
89
+ options.retry = self._config.retry
90
+
91
+ return Query(options, storage=self._cache.storage)
92
+
93
+ async def fetch_query[T](self, options: QueryOptions[T]) -> T:
94
+ """
95
+ Implementation of the Stale-While-Revalidate (SWR) pattern.
96
+ Ensures L2 hydration is settled before checking data availability.
97
+ """
98
+ query: Query[T] = self._get_or_create_query(options)
99
+
100
+ # Ensure L2 hydration settles so we don't double-fetch cold data
101
+ await asyncio.sleep(0)
102
+ await query.wait_for_hydration()
103
+
104
+ if query.state.data is not None:
105
+ if not query.is_stale():
106
+ return query.state.data
107
+
108
+ # Data exists but is stale - return it and trigger background refresh
109
+ asyncio.create_task(query.fetch())
110
+ return query.state.data
111
+
112
+ # Cold fetch (Miss)
113
+ return await query.fetch()
114
+
115
+ async def prefetch_query[T](self, options: QueryOptions[T]) -> None:
116
+ """Warms up the cache for a key without blocking for the result."""
117
+ query: Query[T] = self._get_or_create_query(options)
118
+ await query.wait_for_hydration()
119
+ if query.state.data is None or query.is_stale():
120
+ try:
121
+ await query.fetch()
122
+ except Exception:
123
+ pass # Prefetches fail silently
124
+
125
+ async def get_query_data_async[T](self, key: QueryKey) -> T | None:
126
+ """Checks both memory and persistence for data."""
127
+ key_hash = hash_key(key)
128
+ existing: Query[object] | None = self._cache.get(key_hash)
129
+ if existing:
130
+ return cast(T, existing.state.data)
131
+
132
+ if self._cache.storage:
133
+ try:
134
+ serialized = await self._cache.storage.get(key_hash)
135
+ if serialized:
136
+ state_dict = cast(dict[str, object], json.loads(serialized))
137
+ return cast(T, state_dict.get("data"))
138
+ except Exception:
139
+ pass
140
+ return None
141
+
142
+ def get_query_data[T](self, key: QueryKey) -> T | None:
143
+ """Memory-only data retrieval."""
144
+ query: Query[object] | None = self._cache.get(hash_key(key))
145
+ return cast(T, query.state.data) if query else None
146
+
147
+ def set_query_data[T](self, key: QueryKey, data: T) -> None:
148
+ """Manual cache injection. Triggers persistence to L2."""
149
+ query: Query[object] | None = self._cache.get(hash_key(key))
150
+ if query:
151
+ query._dispatch(
152
+ status=QueryStatus.SUCCESS,
153
+ data=data,
154
+ data_updated_at=time.time(),
155
+ )
156
+
157
+ def get_query_state[
158
+ T, TError: Exception
159
+ ](self, key: QueryKey) -> QueryState[T, TError] | None:
160
+ query: Query[object] | None = self._cache.get(hash_key(key))
161
+ return cast(QueryState[T, TError], query.state) if query else None
162
+
163
+ async def invalidate_queries(
164
+ self,
165
+ filter_key: QueryKey | None = None,
166
+ *,
167
+ refetch: bool = True,
168
+ ) -> None:
169
+ """
170
+ Marks matching queries as stale (data_updated_at=0).
171
+ Active observers will trigger an immediate refetch.
172
+ """
173
+ queries = self._cache.find_all(filter_key)
174
+ tasks: list[asyncio.Task[object]] = []
175
+
176
+ for query in queries:
177
+ query._dispatch(data_updated_at=0.0) # Mark stale in L1/L2
178
+ if refetch and query.observer_count > 0:
179
+ tasks.append(asyncio.create_task(query.fetch()))
180
+
181
+ if tasks:
182
+ await asyncio.gather(*tasks, return_exceptions=True)
183
+
184
+ def remove_queries(self, filter_key: QueryKey | None = None) -> None:
185
+ """Hard removal from L1 cache."""
186
+ queries = self._cache.find_all(filter_key)
187
+ for q in queries:
188
+ self._cache.remove(q.key_hash)
189
+
190
+ def reset_queries(self, filter_key: QueryKey | None = None) -> None:
191
+ """Reverts queries to IDLE state."""
192
+ queries = self._cache.find_all(filter_key)
193
+ for q in queries:
194
+ q._dispatch(
195
+ status=QueryStatus.IDLE,
196
+ fetch_status=FetchStatus.IDLE,
197
+ data=None,
198
+ error=None,
199
+ data_updated_at=None,
200
+ error_updated_at=None,
201
+ fetch_failure_count=0,
202
+ fetch_failure_reason=None,
203
+ )
204
+
205
+ def watch[T](self, options: QueryOptions[T]) -> QueryObserver[T]:
206
+ """Creates a reactive observer for the given options."""
207
+ return QueryObserver(client=self, options=options)
208
+
209
+ def mutation[TInput, TData](
210
+ self,
211
+ options: MutationOptions[TInput, TData],
212
+ ) -> Mutation[TInput, TData]:
213
+ return Mutation(options=options, client=self)
214
+
215
+ def clear(self) -> None:
216
+ """Wipe memory cache."""
217
+ self._cache.clear()
218
+
219
+
@@ -0,0 +1,159 @@
1
+ """
2
+ Convenience functions and decorators for common patterns.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import asyncio
8
+ from collections.abc import Awaitable, Callable
9
+ from typing import cast
10
+
11
+ from .client import QueryClient
12
+ from .options import QueryOptions
13
+ from .types import QueryKey
14
+
15
+
16
+ async def parallel_queries(
17
+ client: QueryClient,
18
+ *options_list: QueryOptions[object],
19
+ ) -> list[object]:
20
+ """
21
+ Execute multiple queries in parallel.
22
+
23
+ All queries are fetched concurrently, and results are returned
24
+ in the same order as the options.
25
+
26
+ Args:
27
+ client: The QueryClient to use.
28
+ *options_list: Query configurations to execute.
29
+
30
+ Returns:
31
+ List of results in the same order as inputs.
32
+
33
+ Example:
34
+ user, posts, stats = await parallel_queries(
35
+ client,
36
+ QueryOptions(("user", uid), get_user),
37
+ QueryOptions(("posts", uid), get_posts),
38
+ QueryOptions(("stats", uid), get_stats),
39
+ )
40
+ """
41
+ return await asyncio.gather(*(client.fetch_query(opts) for opts in options_list))
42
+
43
+
44
+ async def dependent_query[T](
45
+ client: QueryClient,
46
+ depends_on: QueryOptions[T],
47
+ then: Callable[[T], QueryOptions[object]],
48
+ ) -> object:
49
+ """
50
+ Execute a query that depends on the result of another query.
51
+
52
+ First fetches the parent query, then uses its result to construct
53
+ and fetch the child query.
54
+
55
+ Args:
56
+ client: The QueryClient to use.
57
+ depends_on: The parent query to fetch first.
58
+ then: Function that takes parent data and returns child options.
59
+
60
+ Returns:
61
+ The child query result.
62
+
63
+ Example:
64
+ posts = await dependent_query(
65
+ client,
66
+ depends_on=QueryOptions(("user", uid), get_user),
67
+ then=lambda user: QueryOptions(
68
+ ("posts", user.id),
69
+ lambda: get_posts(user.id)
70
+ ),
71
+ )
72
+ """
73
+ parent_data = await client.fetch_query(depends_on)
74
+ child_options = then(parent_data)
75
+ return await client.fetch_query(child_options)
76
+
77
+
78
+ class CachedQuery[T]:
79
+ """
80
+ A cached query wrapper that provides caching utilities.
81
+
82
+ Created by the @query decorator.
83
+ """
84
+
85
+ __slots__ = ("_client", "_options")
86
+
87
+ def __init__(
88
+ self,
89
+ client: QueryClient,
90
+ options: QueryOptions[T],
91
+ ) -> None:
92
+ self._client: QueryClient = client
93
+ self._options: QueryOptions[T] = options
94
+
95
+ async def __call__(self) -> T:
96
+ """Execute the cached query."""
97
+ return await self._client.fetch_query(self._options)
98
+
99
+ async def invalidate(self) -> None:
100
+ """Invalidate the cache for this query."""
101
+ await self._client.invalidate_queries(self._options.query_key)
102
+
103
+ def get_data(self) -> T | None:
104
+ """Get the currently cached data."""
105
+ return cast(T, self._client.get_query_data(self._options.query_key))
106
+
107
+ @property
108
+ def options(self) -> QueryOptions[T]:
109
+ """Get the query options."""
110
+ return self._options
111
+
112
+
113
+ def query[T](
114
+ client: QueryClient,
115
+ key: QueryKey,
116
+ *,
117
+ stale_time: float = 0.0,
118
+ gc_time: float = 300.0,
119
+ retry: int = 3,
120
+ ) -> Callable[[Callable[[], Awaitable[T]]], CachedQuery[T]]:
121
+ """
122
+ Decorator that wraps an async function with caching.
123
+
124
+ Provides a simple way to add caching to existing async functions.
125
+
126
+ Args:
127
+ client: The QueryClient to use.
128
+ key: The query key for caching.
129
+ stale_time: Seconds before data is stale (default: 0).
130
+ gc_time: Seconds before cache entry is collected (default: 300).
131
+ retry: Retry attempts on failure (default: 3).
132
+
133
+ Returns:
134
+ Decorator function.
135
+
136
+ Example:
137
+ @query(client, ("users",), stale_time=60)
138
+ async def get_users():
139
+ return await http_get("/api/users")
140
+
141
+ # Now calling get_users() goes through the cache
142
+ users = await get_users()
143
+
144
+ # Access utilities
145
+ await get_users.invalidate() # Invalidate cache
146
+ get_users.get_data() # Get cached data
147
+ """
148
+
149
+ def decorator(fn: Callable[[], Awaitable[T]]) -> CachedQuery[T]:
150
+ options = QueryOptions(
151
+ query_key=key,
152
+ query_fn=fn,
153
+ stale_time=stale_time,
154
+ gc_time=gc_time,
155
+ retry=retry,
156
+ )
157
+ return CachedQuery(client, options)
158
+
159
+ return decorator
@@ -0,0 +1,63 @@
1
+ """
2
+ Helper utilities for PyStackQuery.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from .types import QueryKey
8
+
9
+
10
+ def hash_key(key: QueryKey) -> str:
11
+ """
12
+ Create a string hash for a query key.
13
+
14
+ Uses simple string conversion for speed.
15
+ Tuples are hashable and their string representation is stable.
16
+
17
+ Args:
18
+ key: The query key tuple.
19
+
20
+ Returns:
21
+ String representation of the key for cache lookup.
22
+ """
23
+ return str(key)
24
+
25
+
26
+ def partial_match(filter_key: QueryKey, target_key: QueryKey) -> bool:
27
+ """
28
+ Check if filter_key is a prefix of target_key.
29
+
30
+ Enables invalidating groups of related queries.
31
+
32
+ Args:
33
+ filter_key: The prefix to match.
34
+ target_key: The full key to check against.
35
+
36
+ Returns:
37
+ True if filter_key is a prefix of target_key.
38
+
39
+ Examples:
40
+ >>> partial_match(("todos",), ("todos", "active"))
41
+ True
42
+ >>> partial_match(("todos",), ("users",))
43
+ False
44
+ >>> partial_match(("todos", "active"), ("todos",))
45
+ False
46
+ """
47
+ if len(filter_key) > len(target_key):
48
+ return False
49
+ return target_key[: len(filter_key)] == filter_key
50
+
51
+
52
+ def default_retry_delay(attempt: int) -> float:
53
+ """
54
+ Default exponential backoff retry delay.
55
+
56
+ Args:
57
+ attempt: Zero-indexed retry attempt number.
58
+
59
+ Returns:
60
+ Delay in seconds, capped at 30 seconds.
61
+ """
62
+ delay = 1.0 * (2**attempt)
63
+ return delay if delay < 30.0 else 30.0