pulse-framework 0.1.42__py3-none-any.whl → 0.1.43__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pulse/__init__.py +12 -3
- pulse/decorators.py +8 -172
- pulse/helpers.py +39 -23
- pulse/queries/client.py +462 -0
- pulse/queries/common.py +28 -0
- pulse/queries/effect.py +39 -0
- pulse/queries/infinite_query.py +1157 -0
- pulse/queries/mutation.py +47 -0
- pulse/queries/query.py +560 -53
- pulse/queries/store.py +81 -18
- pulse/reactive.py +95 -20
- pulse/reactive_extensions.py +19 -7
- pulse/state.py +5 -0
- {pulse_framework-0.1.42.dist-info → pulse_framework-0.1.43.dist-info}/METADATA +1 -1
- {pulse_framework-0.1.42.dist-info → pulse_framework-0.1.43.dist-info}/RECORD +17 -15
- pulse/queries/query_observer.py +0 -365
- {pulse_framework-0.1.42.dist-info → pulse_framework-0.1.43.dist-info}/WHEEL +0 -0
- {pulse_framework-0.1.42.dist-info → pulse_framework-0.1.43.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,1157 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import datetime as dt
|
|
3
|
+
import inspect
|
|
4
|
+
import time
|
|
5
|
+
from collections import deque
|
|
6
|
+
from collections.abc import Awaitable, Callable
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from typing import (
|
|
9
|
+
Any,
|
|
10
|
+
Generic,
|
|
11
|
+
NamedTuple,
|
|
12
|
+
TypeVar,
|
|
13
|
+
cast,
|
|
14
|
+
overload,
|
|
15
|
+
override,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
from pulse.context import PulseContext
|
|
19
|
+
from pulse.helpers import (
|
|
20
|
+
MISSING,
|
|
21
|
+
Disposable,
|
|
22
|
+
call_flexible,
|
|
23
|
+
later,
|
|
24
|
+
maybe_await,
|
|
25
|
+
)
|
|
26
|
+
from pulse.queries.common import (
|
|
27
|
+
ActionError,
|
|
28
|
+
ActionResult,
|
|
29
|
+
ActionSuccess,
|
|
30
|
+
OnErrorFn,
|
|
31
|
+
OnSuccessFn,
|
|
32
|
+
QueryKey,
|
|
33
|
+
QueryStatus,
|
|
34
|
+
bind_state,
|
|
35
|
+
)
|
|
36
|
+
from pulse.queries.query import RETRY_DELAY_DEFAULT, QueryConfig
|
|
37
|
+
from pulse.reactive import Computed, Effect, Signal, Untrack
|
|
38
|
+
from pulse.reactive_extensions import ReactiveList, unwrap
|
|
39
|
+
from pulse.state import InitializableProperty, State
|
|
40
|
+
|
|
41
|
+
T = TypeVar("T")
|
|
42
|
+
TParam = TypeVar("TParam")
|
|
43
|
+
TState = TypeVar("TState", bound=State)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class Page(NamedTuple, Generic[T, TParam]):
|
|
47
|
+
data: T
|
|
48
|
+
param: TParam
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
52
|
+
# Action types for the task queue (pure data)
|
|
53
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclass
|
|
57
|
+
class FetchNext(Generic[T, TParam]):
|
|
58
|
+
"""Fetch the next page."""
|
|
59
|
+
|
|
60
|
+
future: "asyncio.Future[ActionResult[Page[T, TParam] | None]]" = field(
|
|
61
|
+
default_factory=asyncio.Future
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@dataclass
|
|
66
|
+
class FetchPrevious(Generic[T, TParam]):
|
|
67
|
+
"""Fetch the previous page."""
|
|
68
|
+
|
|
69
|
+
future: "asyncio.Future[ActionResult[Page[T, TParam] | None]]" = field(
|
|
70
|
+
default_factory=asyncio.Future
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
@dataclass
|
|
75
|
+
class Refetch(Generic[T, TParam]):
|
|
76
|
+
"""Refetch all pages."""
|
|
77
|
+
|
|
78
|
+
refetch_page: Callable[[T, int, list[T]], bool] | None = None
|
|
79
|
+
future: "asyncio.Future[ActionResult[list[Page[T, TParam]]]]" = field(
|
|
80
|
+
default_factory=asyncio.Future
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
@dataclass
|
|
85
|
+
class RefetchPage(Generic[T, TParam]):
|
|
86
|
+
"""Refetch a single page by param."""
|
|
87
|
+
|
|
88
|
+
param: TParam
|
|
89
|
+
future: "asyncio.Future[ActionResult[T | None]]" = field(
|
|
90
|
+
default_factory=asyncio.Future
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
Action = (
|
|
95
|
+
FetchNext[T, TParam]
|
|
96
|
+
| FetchPrevious[T, TParam]
|
|
97
|
+
| Refetch[T, TParam]
|
|
98
|
+
| RefetchPage[T, TParam]
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
@dataclass(slots=True)
|
|
103
|
+
class InfiniteQueryConfig(QueryConfig[list[Page[T, TParam]]], Generic[T, TParam]):
|
|
104
|
+
"""Configuration for InfiniteQuery. Contains all QueryConfig fields plus infinite query specific options."""
|
|
105
|
+
|
|
106
|
+
initial_page_param: TParam
|
|
107
|
+
get_next_page_param: Callable[[list[Page[T, TParam]]], TParam | None]
|
|
108
|
+
get_previous_page_param: Callable[[list[Page[T, TParam]]], TParam | None] | None
|
|
109
|
+
max_pages: int
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
class InfiniteQuery(Generic[T, TParam], Disposable):
|
|
113
|
+
"""Paginated query that stores data as a list of Page(data, param)."""
|
|
114
|
+
|
|
115
|
+
key: QueryKey
|
|
116
|
+
fn: Callable[[TParam], Awaitable[T]]
|
|
117
|
+
cfg: InfiniteQueryConfig[T, TParam]
|
|
118
|
+
|
|
119
|
+
# Reactive state
|
|
120
|
+
pages: ReactiveList[Page[T, TParam]]
|
|
121
|
+
error: Signal[Exception | None]
|
|
122
|
+
last_updated: Signal[float]
|
|
123
|
+
status: Signal[QueryStatus]
|
|
124
|
+
is_fetching: Signal[bool]
|
|
125
|
+
retries: Signal[int]
|
|
126
|
+
retry_reason: Signal[Exception | None]
|
|
127
|
+
|
|
128
|
+
has_next_page: Signal[bool]
|
|
129
|
+
has_previous_page: Signal[bool]
|
|
130
|
+
current_action: "Signal[Action[T, TParam] | None]"
|
|
131
|
+
|
|
132
|
+
# Task queue
|
|
133
|
+
_queue: deque[Action[T, TParam]]
|
|
134
|
+
_queue_task: asyncio.Task[None] | None
|
|
135
|
+
|
|
136
|
+
_observers: "list[InfiniteQueryResult[T, TParam]]"
|
|
137
|
+
_gc_handle: asyncio.TimerHandle | None
|
|
138
|
+
|
|
139
|
+
def __init__(
|
|
140
|
+
self,
|
|
141
|
+
key: QueryKey,
|
|
142
|
+
fn: Callable[[TParam], Awaitable[T]],
|
|
143
|
+
*,
|
|
144
|
+
initial_page_param: TParam,
|
|
145
|
+
get_next_page_param: Callable[[list[Page[T, TParam]]], TParam | None],
|
|
146
|
+
get_previous_page_param: (
|
|
147
|
+
Callable[[list[Page[T, TParam]]], TParam | None] | None
|
|
148
|
+
) = None,
|
|
149
|
+
max_pages: int = 0,
|
|
150
|
+
retries: int = 3,
|
|
151
|
+
retry_delay: float = RETRY_DELAY_DEFAULT,
|
|
152
|
+
initial_data: list[Page[T, TParam]] | None | Any = MISSING,
|
|
153
|
+
initial_data_updated_at: float | dt.datetime | None = None,
|
|
154
|
+
gc_time: float = 300.0,
|
|
155
|
+
on_dispose: Callable[[Any], None] | None = None,
|
|
156
|
+
):
|
|
157
|
+
self.key = key
|
|
158
|
+
self.fn = fn
|
|
159
|
+
|
|
160
|
+
self.cfg = InfiniteQueryConfig(
|
|
161
|
+
retries=retries,
|
|
162
|
+
retry_delay=retry_delay,
|
|
163
|
+
initial_data=initial_data,
|
|
164
|
+
initial_data_updated_at=initial_data_updated_at,
|
|
165
|
+
gc_time=gc_time,
|
|
166
|
+
on_dispose=on_dispose,
|
|
167
|
+
initial_page_param=initial_page_param,
|
|
168
|
+
get_next_page_param=get_next_page_param,
|
|
169
|
+
get_previous_page_param=get_previous_page_param,
|
|
170
|
+
max_pages=max_pages,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
initial_pages: list[Page[T, TParam]]
|
|
174
|
+
if initial_data is MISSING:
|
|
175
|
+
initial_pages = []
|
|
176
|
+
else:
|
|
177
|
+
initial_pages = cast(list[Page[T, TParam]], initial_data) or []
|
|
178
|
+
|
|
179
|
+
self.pages = ReactiveList(initial_pages)
|
|
180
|
+
self.error = Signal(None, name=f"inf_query.error({key})")
|
|
181
|
+
self.last_updated = Signal(0.0, name=f"inf_query.last_updated({key})")
|
|
182
|
+
if initial_data_updated_at:
|
|
183
|
+
self.set_updated_at(initial_data_updated_at)
|
|
184
|
+
|
|
185
|
+
self.status = Signal(
|
|
186
|
+
"loading" if len(initial_pages) == 0 else "success",
|
|
187
|
+
name=f"inf_query.status({key})",
|
|
188
|
+
)
|
|
189
|
+
self.is_fetching = Signal(False, name=f"inf_query.is_fetching({key})")
|
|
190
|
+
self.retries = Signal(0, name=f"inf_query.retries({key})")
|
|
191
|
+
self.retry_reason = Signal(None, name=f"inf_query.retry_reason({key})")
|
|
192
|
+
|
|
193
|
+
self.has_next_page = Signal(False, name=f"inf_query.has_next({key})")
|
|
194
|
+
self.has_previous_page = Signal(False, name=f"inf_query.has_prev({key})")
|
|
195
|
+
self.current_action = Signal(None, name=f"inf_query.current_action({key})")
|
|
196
|
+
|
|
197
|
+
self._queue = deque()
|
|
198
|
+
self._queue_task = None
|
|
199
|
+
self._observers = []
|
|
200
|
+
self._gc_handle = None
|
|
201
|
+
|
|
202
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
203
|
+
# Commit functions - update state after pages have been modified
|
|
204
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
205
|
+
|
|
206
|
+
async def commit(self):
|
|
207
|
+
"""Commit current pages state and run success callbacks."""
|
|
208
|
+
self._commit_sync()
|
|
209
|
+
|
|
210
|
+
for obs in self._observers:
|
|
211
|
+
if obs._on_success is not None: # pyright: ignore[reportPrivateUsage]
|
|
212
|
+
await maybe_await(call_flexible(obs._on_success, self.pages)) # pyright: ignore[reportPrivateUsage]
|
|
213
|
+
|
|
214
|
+
async def _commit_error(self, error: Exception):
|
|
215
|
+
"""Commit error state and run error callbacks."""
|
|
216
|
+
self._commit_error_sync(error)
|
|
217
|
+
|
|
218
|
+
for obs in self._observers:
|
|
219
|
+
if obs._on_error is not None: # pyright: ignore[reportPrivateUsage]
|
|
220
|
+
await maybe_await(call_flexible(obs._on_error, error)) # pyright: ignore[reportPrivateUsage]
|
|
221
|
+
|
|
222
|
+
def _commit_sync(self):
|
|
223
|
+
"""Synchronous commit - updates state based on current pages."""
|
|
224
|
+
self._update_has_more()
|
|
225
|
+
self.last_updated.write(time.time())
|
|
226
|
+
self.error.write(None)
|
|
227
|
+
self.status.write("success")
|
|
228
|
+
self.retries.write(0)
|
|
229
|
+
self.retry_reason.write(None)
|
|
230
|
+
|
|
231
|
+
def _commit_error_sync(self, error: Exception):
|
|
232
|
+
"""Synchronous error commit for set_error (no callbacks)."""
|
|
233
|
+
self.error.write(error)
|
|
234
|
+
self.last_updated.write(time.time())
|
|
235
|
+
self.status.write("error")
|
|
236
|
+
self.is_fetching.write(False)
|
|
237
|
+
|
|
238
|
+
def _record_retry(self, reason: Exception):
|
|
239
|
+
"""Record a failed retry attempt."""
|
|
240
|
+
self.retries.write(self.retries.read() + 1)
|
|
241
|
+
self.retry_reason.write(reason)
|
|
242
|
+
|
|
243
|
+
def _reset_retries(self):
|
|
244
|
+
"""Reset retry state at start of operation."""
|
|
245
|
+
self.retries.write(0)
|
|
246
|
+
self.retry_reason.write(None)
|
|
247
|
+
|
|
248
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
249
|
+
# Public API
|
|
250
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
251
|
+
|
|
252
|
+
def set_updated_at(self, updated_at: float | dt.datetime):
|
|
253
|
+
if isinstance(updated_at, dt.datetime):
|
|
254
|
+
updated_at = updated_at.timestamp()
|
|
255
|
+
self.last_updated.write(updated_at)
|
|
256
|
+
|
|
257
|
+
def set_initial_data(
|
|
258
|
+
self,
|
|
259
|
+
pages: list[Page[T, TParam]] | Callable[[], list[Page[T, TParam]]],
|
|
260
|
+
updated_at: float | dt.datetime | None = None,
|
|
261
|
+
):
|
|
262
|
+
"""Set initial pages while the query is still loading."""
|
|
263
|
+
if self.status() != "loading":
|
|
264
|
+
return
|
|
265
|
+
value = pages() if callable(pages) else pages
|
|
266
|
+
self.set_data(value, updated_at=updated_at)
|
|
267
|
+
|
|
268
|
+
def set_data(
|
|
269
|
+
self,
|
|
270
|
+
pages: list[Page[T, TParam]]
|
|
271
|
+
| Callable[[list[Page[T, TParam]]], list[Page[T, TParam]]],
|
|
272
|
+
updated_at: float | dt.datetime | None = None,
|
|
273
|
+
):
|
|
274
|
+
"""Set pages manually, keeping has_next/prev in sync."""
|
|
275
|
+
new_pages = pages(self.pages) if callable(pages) else pages
|
|
276
|
+
self.pages.clear()
|
|
277
|
+
self.pages.extend(new_pages)
|
|
278
|
+
self._trim_back()
|
|
279
|
+
self._commit_sync()
|
|
280
|
+
if updated_at is not None:
|
|
281
|
+
self.set_updated_at(updated_at)
|
|
282
|
+
|
|
283
|
+
def set_error(
|
|
284
|
+
self, error: Exception, *, updated_at: float | dt.datetime | None = None
|
|
285
|
+
):
|
|
286
|
+
self._commit_error_sync(error)
|
|
287
|
+
if updated_at is not None:
|
|
288
|
+
self.set_updated_at(updated_at)
|
|
289
|
+
|
|
290
|
+
async def wait(self) -> ActionResult[list[Page[T, TParam]]]:
|
|
291
|
+
"""Wait for initial data or until queue is empty."""
|
|
292
|
+
# If no data and loading, enqueue initial fetch (unless already processing)
|
|
293
|
+
if len(self.pages) == 0 and self.status() == "loading":
|
|
294
|
+
if self._queue_task is None or self._queue_task.done():
|
|
295
|
+
self._enqueue(Refetch())
|
|
296
|
+
# Wait for any in-progress queue processing
|
|
297
|
+
if self._queue_task and not self._queue_task.done():
|
|
298
|
+
await self._queue_task
|
|
299
|
+
# Return result based on current state
|
|
300
|
+
if self.status() == "error":
|
|
301
|
+
return ActionError(cast(Exception, self.error()))
|
|
302
|
+
return ActionSuccess(list(self.pages))
|
|
303
|
+
|
|
304
|
+
def observe(self, observer: Any):
|
|
305
|
+
self._observers.append(observer)
|
|
306
|
+
self.cancel_gc()
|
|
307
|
+
gc_time = getattr(observer, "_gc_time", 0)
|
|
308
|
+
if gc_time and gc_time > 0:
|
|
309
|
+
self.cfg.gc_time = max(self.cfg.gc_time, gc_time)
|
|
310
|
+
|
|
311
|
+
def unobserve(self, observer: Any):
|
|
312
|
+
if observer in self._observers:
|
|
313
|
+
self._observers.remove(observer)
|
|
314
|
+
if len(self._observers) == 0:
|
|
315
|
+
self.schedule_gc()
|
|
316
|
+
|
|
317
|
+
def invalidate(
|
|
318
|
+
self,
|
|
319
|
+
*,
|
|
320
|
+
cancel_fetch: bool = False,
|
|
321
|
+
refetch_page: Callable[[T, int, list[T]], bool] | None = None,
|
|
322
|
+
):
|
|
323
|
+
"""Enqueue a refetch. Synchronous - does not wait for completion."""
|
|
324
|
+
if cancel_fetch:
|
|
325
|
+
self._cancel_queue()
|
|
326
|
+
if len(self._observers) > 0:
|
|
327
|
+
self._enqueue(Refetch(refetch_page=refetch_page))
|
|
328
|
+
|
|
329
|
+
def schedule_gc(self):
|
|
330
|
+
self.cancel_gc()
|
|
331
|
+
if self.cfg.gc_time > 0:
|
|
332
|
+
self._gc_handle = later(self.cfg.gc_time, self.dispose)
|
|
333
|
+
else:
|
|
334
|
+
self.dispose()
|
|
335
|
+
|
|
336
|
+
def cancel_gc(self):
|
|
337
|
+
if self._gc_handle:
|
|
338
|
+
self._gc_handle.cancel()
|
|
339
|
+
self._gc_handle = None
|
|
340
|
+
|
|
341
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
342
|
+
# Page param computation
|
|
343
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
344
|
+
|
|
345
|
+
def compute_next_param(self) -> TParam | None:
|
|
346
|
+
if len(self.pages) == 0:
|
|
347
|
+
return self.cfg.initial_page_param
|
|
348
|
+
return self.cfg.get_next_page_param(self.pages)
|
|
349
|
+
|
|
350
|
+
def compute_previous_param(self) -> TParam | None:
|
|
351
|
+
if self.cfg.get_previous_page_param is None:
|
|
352
|
+
return None
|
|
353
|
+
if len(self.pages) == 0:
|
|
354
|
+
return None
|
|
355
|
+
return self.cfg.get_previous_page_param(self.pages)
|
|
356
|
+
|
|
357
|
+
def _update_has_more(self):
|
|
358
|
+
if len(self.pages) == 0:
|
|
359
|
+
self.has_next_page.write(False)
|
|
360
|
+
self.has_previous_page.write(self.cfg.get_previous_page_param is not None)
|
|
361
|
+
return
|
|
362
|
+
next_param = self.cfg.get_next_page_param(self.pages)
|
|
363
|
+
prev_param = None
|
|
364
|
+
if self.cfg.get_previous_page_param:
|
|
365
|
+
prev_param = self.cfg.get_previous_page_param(self.pages)
|
|
366
|
+
self.has_next_page.write(next_param is not None)
|
|
367
|
+
self.has_previous_page.write(prev_param is not None)
|
|
368
|
+
|
|
369
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
370
|
+
# Trimming helpers
|
|
371
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
372
|
+
|
|
373
|
+
def _trim_front(self):
|
|
374
|
+
"""Trim pages from front when over max_pages."""
|
|
375
|
+
if self.cfg.max_pages and self.cfg.max_pages > 0:
|
|
376
|
+
while len(self.pages) > self.cfg.max_pages:
|
|
377
|
+
self.pages.pop(0)
|
|
378
|
+
|
|
379
|
+
def _trim_back(self):
|
|
380
|
+
"""Trim pages from back when over max_pages."""
|
|
381
|
+
if self.cfg.max_pages and self.cfg.max_pages > 0:
|
|
382
|
+
while len(self.pages) > self.cfg.max_pages:
|
|
383
|
+
self.pages.pop()
|
|
384
|
+
|
|
385
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
386
|
+
# Task Queue
|
|
387
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
388
|
+
|
|
389
|
+
def _cancel_queue(self):
|
|
390
|
+
"""Cancel all pending and in-flight actions."""
|
|
391
|
+
# Cancel pending actions in the queue
|
|
392
|
+
while self._queue:
|
|
393
|
+
action = self._queue.popleft()
|
|
394
|
+
if not action.future.done():
|
|
395
|
+
action.future.cancel()
|
|
396
|
+
|
|
397
|
+
# Cancel the currently executing action and task
|
|
398
|
+
current = self.current_action.read()
|
|
399
|
+
if current is not None and not current.future.done():
|
|
400
|
+
current.future.cancel()
|
|
401
|
+
|
|
402
|
+
if self._queue_task and not self._queue_task.done():
|
|
403
|
+
self._queue_task.cancel()
|
|
404
|
+
self._queue_task = None
|
|
405
|
+
|
|
406
|
+
def _enqueue(
|
|
407
|
+
self,
|
|
408
|
+
action: "FetchNext[T, TParam] | FetchPrevious[T, TParam] | Refetch[T, TParam] | RefetchPage[T, TParam]",
|
|
409
|
+
*,
|
|
410
|
+
cancel_fetch: bool = False,
|
|
411
|
+
) -> asyncio.Future[Any]:
|
|
412
|
+
"""Enqueue an action and ensure the processor is running."""
|
|
413
|
+
if cancel_fetch:
|
|
414
|
+
self._cancel_queue()
|
|
415
|
+
|
|
416
|
+
self._queue.append(action)
|
|
417
|
+
self._ensure_processor()
|
|
418
|
+
return action.future
|
|
419
|
+
|
|
420
|
+
def _ensure_processor(self):
|
|
421
|
+
"""Ensure the queue processor task is running."""
|
|
422
|
+
if self._queue_task is None or self._queue_task.done():
|
|
423
|
+
# Create task with no reactive scope to avoid inheriting deps from caller
|
|
424
|
+
with Untrack():
|
|
425
|
+
self._queue_task = asyncio.create_task(self._process_queue())
|
|
426
|
+
return self._queue_task
|
|
427
|
+
|
|
428
|
+
async def _process_queue(self):
|
|
429
|
+
"""Process queued actions sequentially with retry logic."""
|
|
430
|
+
while self._queue:
|
|
431
|
+
action = self._queue.popleft()
|
|
432
|
+
|
|
433
|
+
if action.future.cancelled():
|
|
434
|
+
continue
|
|
435
|
+
|
|
436
|
+
# Reset state for new action
|
|
437
|
+
self._reset_retries()
|
|
438
|
+
self.is_fetching.write(True)
|
|
439
|
+
self.current_action.write(action)
|
|
440
|
+
|
|
441
|
+
try:
|
|
442
|
+
while True:
|
|
443
|
+
try:
|
|
444
|
+
result = await self._execute_action(action)
|
|
445
|
+
if not action.future.done():
|
|
446
|
+
action.future.set_result(ActionSuccess(result))
|
|
447
|
+
break
|
|
448
|
+
except asyncio.CancelledError:
|
|
449
|
+
raise
|
|
450
|
+
except Exception as e:
|
|
451
|
+
if self.retries.read() < self.cfg.retries:
|
|
452
|
+
self._record_retry(e)
|
|
453
|
+
await asyncio.sleep(self.cfg.retry_delay)
|
|
454
|
+
continue
|
|
455
|
+
raise
|
|
456
|
+
except asyncio.CancelledError:
|
|
457
|
+
if not action.future.done():
|
|
458
|
+
action.future.cancel()
|
|
459
|
+
raise
|
|
460
|
+
except Exception as e:
|
|
461
|
+
self.retry_reason.write(e)
|
|
462
|
+
await self._commit_error(e)
|
|
463
|
+
if not action.future.done():
|
|
464
|
+
action.future.set_result(ActionError(e))
|
|
465
|
+
finally:
|
|
466
|
+
# Only reset state if we're still the current action
|
|
467
|
+
# (not replaced by another action via cancel_fetch)
|
|
468
|
+
if self.current_action.read() is action:
|
|
469
|
+
self.is_fetching.write(False)
|
|
470
|
+
self.current_action.write(None)
|
|
471
|
+
|
|
472
|
+
async def _execute_action(
|
|
473
|
+
self,
|
|
474
|
+
action: "FetchNext[T, TParam] | FetchPrevious[T, TParam] | Refetch[T, TParam] | RefetchPage[T, TParam]",
|
|
475
|
+
) -> Any:
|
|
476
|
+
"""Execute a single action."""
|
|
477
|
+
if isinstance(action, FetchNext):
|
|
478
|
+
return await self._execute_fetch_next(action)
|
|
479
|
+
elif isinstance(action, FetchPrevious):
|
|
480
|
+
return await self._execute_fetch_previous(action)
|
|
481
|
+
elif isinstance(action, Refetch):
|
|
482
|
+
return await self._execute_refetch_all(action)
|
|
483
|
+
elif isinstance(action, RefetchPage):
|
|
484
|
+
return await self._execute_refetch_one(action)
|
|
485
|
+
else:
|
|
486
|
+
raise TypeError(f"Unknown action type: {type(action)}")
|
|
487
|
+
|
|
488
|
+
async def _execute_fetch_next(
|
|
489
|
+
self, action: "FetchNext[T, TParam]"
|
|
490
|
+
) -> Page[T, TParam] | None:
|
|
491
|
+
next_param = self.compute_next_param()
|
|
492
|
+
if next_param is None:
|
|
493
|
+
self.has_next_page.write(False)
|
|
494
|
+
return None
|
|
495
|
+
|
|
496
|
+
page = await self.fn(next_param)
|
|
497
|
+
page = Page(page, next_param)
|
|
498
|
+
self.pages.append(page)
|
|
499
|
+
self._trim_front()
|
|
500
|
+
await self.commit()
|
|
501
|
+
return page
|
|
502
|
+
|
|
503
|
+
async def _execute_fetch_previous(
|
|
504
|
+
self, action: "FetchPrevious[T, TParam]"
|
|
505
|
+
) -> Page[T, TParam] | None:
|
|
506
|
+
prev_param = self.compute_previous_param()
|
|
507
|
+
if prev_param is None:
|
|
508
|
+
self.has_previous_page.write(False)
|
|
509
|
+
return None
|
|
510
|
+
|
|
511
|
+
data = await self.fn(prev_param)
|
|
512
|
+
page = Page(data, prev_param)
|
|
513
|
+
self.pages.insert(0, page)
|
|
514
|
+
self._trim_back()
|
|
515
|
+
await self.commit()
|
|
516
|
+
return page
|
|
517
|
+
|
|
518
|
+
async def _execute_refetch_all(
|
|
519
|
+
self, action: "Refetch[T, TParam]"
|
|
520
|
+
) -> list[Page[T, TParam]]:
|
|
521
|
+
if len(self.pages) == 0:
|
|
522
|
+
page = await self.fn(self.cfg.initial_page_param)
|
|
523
|
+
self.pages.append(Page(page, self.cfg.initial_page_param))
|
|
524
|
+
await self.commit()
|
|
525
|
+
return self.pages
|
|
526
|
+
|
|
527
|
+
page_param: TParam = self.pages[0].param
|
|
528
|
+
num_existing = len(self.pages)
|
|
529
|
+
|
|
530
|
+
for idx in range(num_existing):
|
|
531
|
+
old_page = self.pages[idx]
|
|
532
|
+
should_refetch = True
|
|
533
|
+
if action.refetch_page is not None:
|
|
534
|
+
should_refetch = bool(
|
|
535
|
+
action.refetch_page(
|
|
536
|
+
old_page.data, idx, [p.data for p in self.pages]
|
|
537
|
+
)
|
|
538
|
+
)
|
|
539
|
+
|
|
540
|
+
if should_refetch:
|
|
541
|
+
page = await self.fn(page_param)
|
|
542
|
+
else:
|
|
543
|
+
page = old_page.data
|
|
544
|
+
self.pages[idx] = Page(page, page_param)
|
|
545
|
+
|
|
546
|
+
next_param = self.cfg.get_next_page_param(self.pages[: idx + 1])
|
|
547
|
+
if next_param is None:
|
|
548
|
+
# Trim remaining pages if we ended early
|
|
549
|
+
while len(self.pages) > idx + 1:
|
|
550
|
+
self.pages.pop()
|
|
551
|
+
break
|
|
552
|
+
page_param = next_param
|
|
553
|
+
|
|
554
|
+
await self.commit()
|
|
555
|
+
return self.pages
|
|
556
|
+
|
|
557
|
+
async def _execute_refetch_one(self, action: "RefetchPage[T, TParam]") -> T | None:
|
|
558
|
+
idx = next(
|
|
559
|
+
(i for i, p in enumerate(self.pages) if p.param == action.param),
|
|
560
|
+
None,
|
|
561
|
+
)
|
|
562
|
+
if idx is None:
|
|
563
|
+
return None
|
|
564
|
+
|
|
565
|
+
page = await self.fn(action.param)
|
|
566
|
+
self.pages[idx] = Page(page, action.param)
|
|
567
|
+
await self.commit()
|
|
568
|
+
return page
|
|
569
|
+
|
|
570
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
571
|
+
# Public fetch API
|
|
572
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
573
|
+
|
|
574
|
+
async def fetch_next_page(
|
|
575
|
+
self,
|
|
576
|
+
*,
|
|
577
|
+
cancel_fetch: bool = False,
|
|
578
|
+
) -> ActionResult[Page[T, TParam] | None]:
|
|
579
|
+
"""Fetch the next page. Queued for sequential execution."""
|
|
580
|
+
action: FetchNext[T, TParam] = FetchNext()
|
|
581
|
+
return await self._enqueue(action, cancel_fetch=cancel_fetch)
|
|
582
|
+
|
|
583
|
+
async def fetch_previous_page(
|
|
584
|
+
self,
|
|
585
|
+
*,
|
|
586
|
+
cancel_fetch: bool = False,
|
|
587
|
+
) -> ActionResult[Page[T, TParam] | None]:
|
|
588
|
+
"""Fetch the previous page. Queued for sequential execution."""
|
|
589
|
+
action: FetchPrevious[T, TParam] = FetchPrevious()
|
|
590
|
+
return await self._enqueue(action, cancel_fetch=cancel_fetch)
|
|
591
|
+
|
|
592
|
+
async def refetch(
|
|
593
|
+
self,
|
|
594
|
+
*,
|
|
595
|
+
cancel_fetch: bool = False,
|
|
596
|
+
refetch_page: Callable[[T, int, list[T]], bool] | None = None,
|
|
597
|
+
) -> ActionResult[list[Page[T, TParam]]]:
|
|
598
|
+
"""Refetch all pages. Queued for sequential execution."""
|
|
599
|
+
action: Refetch[T, TParam] = Refetch(refetch_page=refetch_page)
|
|
600
|
+
return await self._enqueue(action, cancel_fetch=cancel_fetch)
|
|
601
|
+
|
|
602
|
+
async def refetch_page(
|
|
603
|
+
self,
|
|
604
|
+
param: TParam,
|
|
605
|
+
*,
|
|
606
|
+
cancel_fetch: bool = False,
|
|
607
|
+
) -> ActionResult[T | None]:
|
|
608
|
+
"""Refetch an existing page by its param. Queued for sequential execution."""
|
|
609
|
+
action: RefetchPage[T, TParam] = RefetchPage(param=param)
|
|
610
|
+
return await self._enqueue(action, cancel_fetch=cancel_fetch)
|
|
611
|
+
|
|
612
|
+
@override
|
|
613
|
+
def dispose(self):
|
|
614
|
+
self._cancel_queue()
|
|
615
|
+
if self._queue_task and not self._queue_task.done():
|
|
616
|
+
self._queue_task.cancel()
|
|
617
|
+
if self.cfg.on_dispose:
|
|
618
|
+
self.cfg.on_dispose(self)
|
|
619
|
+
|
|
620
|
+
|
|
621
|
+
def none_if_missing(value: Any):
|
|
622
|
+
return None if value is MISSING else value
|
|
623
|
+
|
|
624
|
+
|
|
625
|
+
class InfiniteQueryResult(Generic[T, TParam], Disposable):
|
|
626
|
+
"""
|
|
627
|
+
Observer wrapper for InfiniteQuery with lifecycle and stale tracking.
|
|
628
|
+
"""
|
|
629
|
+
|
|
630
|
+
_query: Computed[InfiniteQuery[T, TParam]]
|
|
631
|
+
_stale_time: float
|
|
632
|
+
_gc_time: float
|
|
633
|
+
_refetch_interval: float | None
|
|
634
|
+
_keep_previous_data: bool
|
|
635
|
+
_on_success: Callable[[list[Page[T, TParam]]], Awaitable[None] | None] | None
|
|
636
|
+
_on_error: Callable[[Exception], Awaitable[None] | None] | None
|
|
637
|
+
_observe_effect: Effect
|
|
638
|
+
_interval_effect: Effect | None
|
|
639
|
+
_data_computed: Computed[list[Page[T, TParam]] | None]
|
|
640
|
+
_enabled: Signal[bool]
|
|
641
|
+
_fetch_on_mount: bool
|
|
642
|
+
|
|
643
|
+
def __init__(
|
|
644
|
+
self,
|
|
645
|
+
query: Computed[InfiniteQuery[T, TParam]],
|
|
646
|
+
stale_time: float = 0.0,
|
|
647
|
+
gc_time: float = 300.0,
|
|
648
|
+
refetch_interval: float | None = None,
|
|
649
|
+
keep_previous_data: bool = False,
|
|
650
|
+
on_success: Callable[[list[Page[T, TParam]]], Awaitable[None] | None]
|
|
651
|
+
| None = None,
|
|
652
|
+
on_error: Callable[[Exception], Awaitable[None] | None] | None = None,
|
|
653
|
+
enabled: bool = True,
|
|
654
|
+
fetch_on_mount: bool = True,
|
|
655
|
+
):
|
|
656
|
+
self._query = query
|
|
657
|
+
self._stale_time = stale_time
|
|
658
|
+
self._gc_time = gc_time
|
|
659
|
+
self._refetch_interval = refetch_interval
|
|
660
|
+
self._keep_previous_data = keep_previous_data
|
|
661
|
+
self._on_success = on_success
|
|
662
|
+
self._on_error = on_error
|
|
663
|
+
self._enabled = Signal(enabled, name=f"inf_query.enabled({query().key})")
|
|
664
|
+
self._fetch_on_mount = fetch_on_mount
|
|
665
|
+
self._interval_effect = None
|
|
666
|
+
|
|
667
|
+
def observe_effect():
|
|
668
|
+
q = self._query()
|
|
669
|
+
enabled = self._enabled()
|
|
670
|
+
with Untrack():
|
|
671
|
+
q.observe(self)
|
|
672
|
+
|
|
673
|
+
if enabled and fetch_on_mount and self.is_stale():
|
|
674
|
+
q.invalidate()
|
|
675
|
+
|
|
676
|
+
def cleanup():
|
|
677
|
+
q.unobserve(self)
|
|
678
|
+
|
|
679
|
+
return cleanup
|
|
680
|
+
|
|
681
|
+
self._observe_effect = Effect(
|
|
682
|
+
observe_effect,
|
|
683
|
+
name=f"inf_query_observe({self._query().key})",
|
|
684
|
+
immediate=True,
|
|
685
|
+
)
|
|
686
|
+
self._data_computed = Computed(
|
|
687
|
+
self._data_computed_fn, name=f"inf_query_data({self._query().key})"
|
|
688
|
+
)
|
|
689
|
+
|
|
690
|
+
# Set up interval effect if interval is specified
|
|
691
|
+
if refetch_interval is not None and refetch_interval > 0:
|
|
692
|
+
self._setup_interval_effect(refetch_interval)
|
|
693
|
+
|
|
694
|
+
def _setup_interval_effect(self, interval: float):
|
|
695
|
+
"""Create an effect that invalidates the query at the specified interval."""
|
|
696
|
+
|
|
697
|
+
def interval_fn():
|
|
698
|
+
# Read enabled to make this effect reactive to enabled changes
|
|
699
|
+
if self._enabled():
|
|
700
|
+
self._query().invalidate()
|
|
701
|
+
|
|
702
|
+
self._interval_effect = Effect(
|
|
703
|
+
interval_fn,
|
|
704
|
+
name=f"inf_query_interval({self._query().key})",
|
|
705
|
+
interval=interval,
|
|
706
|
+
immediate=True,
|
|
707
|
+
)
|
|
708
|
+
|
|
709
|
+
@property
|
|
710
|
+
def status(self) -> QueryStatus:
|
|
711
|
+
return self._query().status()
|
|
712
|
+
|
|
713
|
+
@property
|
|
714
|
+
def is_loading(self) -> bool:
|
|
715
|
+
return self.status == "loading"
|
|
716
|
+
|
|
717
|
+
@property
|
|
718
|
+
def is_success(self) -> bool:
|
|
719
|
+
return self.status == "success"
|
|
720
|
+
|
|
721
|
+
@property
|
|
722
|
+
def is_error(self) -> bool:
|
|
723
|
+
return self.status == "error"
|
|
724
|
+
|
|
725
|
+
@property
|
|
726
|
+
def is_fetching(self) -> bool:
|
|
727
|
+
return self._query().is_fetching()
|
|
728
|
+
|
|
729
|
+
@property
|
|
730
|
+
def error(self) -> Exception | None:
|
|
731
|
+
return self._query().error.read()
|
|
732
|
+
|
|
733
|
+
def _data_computed_fn(
|
|
734
|
+
self, prev: list[Page[T, TParam]] | None
|
|
735
|
+
) -> list[Page[T, TParam]] | None:
|
|
736
|
+
query = self._query()
|
|
737
|
+
if self._keep_previous_data and query.status() != "success":
|
|
738
|
+
return prev
|
|
739
|
+
# Access pages.version to subscribe to structural changes
|
|
740
|
+
result = unwrap(query.pages) if len(query.pages) > 0 else None
|
|
741
|
+
return result
|
|
742
|
+
|
|
743
|
+
@property
|
|
744
|
+
def data(self) -> list[Page[T, TParam]] | None:
|
|
745
|
+
return self._data_computed()
|
|
746
|
+
|
|
747
|
+
@property
|
|
748
|
+
def pages(self) -> list[T] | None:
|
|
749
|
+
d = self.data
|
|
750
|
+
return [p.data for p in d] if d else None
|
|
751
|
+
|
|
752
|
+
@property
|
|
753
|
+
def page_params(self) -> list[TParam] | None:
|
|
754
|
+
d = self.data
|
|
755
|
+
return [p.param for p in d] if d else None
|
|
756
|
+
|
|
757
|
+
@property
|
|
758
|
+
def has_next_page(self) -> bool:
|
|
759
|
+
return self._query().has_next_page()
|
|
760
|
+
|
|
761
|
+
@property
|
|
762
|
+
def has_previous_page(self) -> bool:
|
|
763
|
+
return self._query().has_previous_page()
|
|
764
|
+
|
|
765
|
+
@property
|
|
766
|
+
def is_fetching_next_page(self) -> bool:
|
|
767
|
+
return isinstance(self._query().current_action(), FetchNext)
|
|
768
|
+
|
|
769
|
+
@property
|
|
770
|
+
def is_fetching_previous_page(self) -> bool:
|
|
771
|
+
return isinstance(self._query().current_action(), FetchPrevious)
|
|
772
|
+
|
|
773
|
+
def is_stale(self) -> bool:
|
|
774
|
+
if self._stale_time <= 0:
|
|
775
|
+
return False
|
|
776
|
+
query = self._query()
|
|
777
|
+
return (time.time() - query.last_updated.read()) > self._stale_time
|
|
778
|
+
|
|
779
|
+
async def fetch_next_page(
|
|
780
|
+
self,
|
|
781
|
+
*,
|
|
782
|
+
cancel_fetch: bool = False,
|
|
783
|
+
) -> ActionResult[Page[T, TParam] | None]:
|
|
784
|
+
return await self._query().fetch_next_page(cancel_fetch=cancel_fetch)
|
|
785
|
+
|
|
786
|
+
async def fetch_previous_page(
|
|
787
|
+
self,
|
|
788
|
+
*,
|
|
789
|
+
cancel_fetch: bool = False,
|
|
790
|
+
) -> ActionResult[Page[T, TParam] | None]:
|
|
791
|
+
return await self._query().fetch_previous_page(cancel_fetch=cancel_fetch)
|
|
792
|
+
|
|
793
|
+
async def fetch_page(
|
|
794
|
+
self,
|
|
795
|
+
page_param: TParam,
|
|
796
|
+
*,
|
|
797
|
+
cancel_fetch: bool = False,
|
|
798
|
+
) -> ActionResult[T | None]:
|
|
799
|
+
return await self._query().refetch_page(page_param, cancel_fetch=cancel_fetch)
|
|
800
|
+
|
|
801
|
+
def set_initial_data(
|
|
802
|
+
self,
|
|
803
|
+
pages: list[Page[T, TParam]] | Callable[[], list[Page[T, TParam]]],
|
|
804
|
+
updated_at: float | dt.datetime | None = None,
|
|
805
|
+
):
|
|
806
|
+
return self._query().set_initial_data(pages, updated_at=updated_at)
|
|
807
|
+
|
|
808
|
+
def set_data(
|
|
809
|
+
self,
|
|
810
|
+
pages: list[Page[T, TParam]]
|
|
811
|
+
| Callable[[list[Page[T, TParam]] | None], list[Page[T, TParam]]],
|
|
812
|
+
updated_at: float | dt.datetime | None = None,
|
|
813
|
+
):
|
|
814
|
+
return self._query().set_data(pages, updated_at=updated_at)
|
|
815
|
+
|
|
816
|
+
async def refetch(
|
|
817
|
+
self,
|
|
818
|
+
*,
|
|
819
|
+
cancel_fetch: bool = False,
|
|
820
|
+
refetch_page: Callable[[T, int, list[T]], bool] | None = None,
|
|
821
|
+
) -> ActionResult[list[Page[T, TParam]]]:
|
|
822
|
+
return await self._query().refetch(
|
|
823
|
+
cancel_fetch=cancel_fetch, refetch_page=refetch_page
|
|
824
|
+
)
|
|
825
|
+
|
|
826
|
+
async def wait(self) -> ActionResult[list[Page[T, TParam]]]:
|
|
827
|
+
return await self._query().wait()
|
|
828
|
+
|
|
829
|
+
def invalidate(self):
|
|
830
|
+
query = self._query()
|
|
831
|
+
query.invalidate()
|
|
832
|
+
|
|
833
|
+
def enable(self):
|
|
834
|
+
self._enabled.write(True)
|
|
835
|
+
|
|
836
|
+
def disable(self):
|
|
837
|
+
self._enabled.write(False)
|
|
838
|
+
|
|
839
|
+
def set_error(self, error: Exception):
|
|
840
|
+
query = self._query()
|
|
841
|
+
query.set_error(error)
|
|
842
|
+
|
|
843
|
+
@override
|
|
844
|
+
def dispose(self):
|
|
845
|
+
if self._interval_effect is not None:
|
|
846
|
+
self._interval_effect.dispose()
|
|
847
|
+
self._observe_effect.dispose()
|
|
848
|
+
|
|
849
|
+
|
|
850
|
+
class InfiniteQueryProperty(Generic[T, TParam, TState], InitializableProperty):
|
|
851
|
+
name: str
|
|
852
|
+
_fetch_fn: "Callable[[TState, TParam], Awaitable[T]]"
|
|
853
|
+
_keep_alive: bool
|
|
854
|
+
_keep_previous_data: bool
|
|
855
|
+
_stale_time: float
|
|
856
|
+
_gc_time: float
|
|
857
|
+
_refetch_interval: float | None
|
|
858
|
+
_retries: int
|
|
859
|
+
_retry_delay: float
|
|
860
|
+
_initial_page_param: TParam
|
|
861
|
+
_get_next_page_param: (
|
|
862
|
+
Callable[[TState, list[Page[T, TParam]]], TParam | None] | None
|
|
863
|
+
)
|
|
864
|
+
_get_previous_page_param: (
|
|
865
|
+
Callable[[TState, list[Page[T, TParam]]], TParam | None] | None
|
|
866
|
+
)
|
|
867
|
+
_max_pages: int
|
|
868
|
+
_key: QueryKey | Callable[[TState], QueryKey] | None
|
|
869
|
+
# Not using OnSuccessFn and OnErrorFn since unions of callables are not well
|
|
870
|
+
# supported in the type system. We just need to be careful to use
|
|
871
|
+
# call_flexible to invoke these functions.
|
|
872
|
+
_on_success_fn: Callable[[TState, list[T]], Any] | None
|
|
873
|
+
_on_error_fn: Callable[[TState, Exception], Any] | None
|
|
874
|
+
_initial_data_updated_at: float | dt.datetime | None
|
|
875
|
+
_enabled: bool
|
|
876
|
+
_fetch_on_mount: bool
|
|
877
|
+
_priv_result: str
|
|
878
|
+
|
|
879
|
+
def __init__(
|
|
880
|
+
self,
|
|
881
|
+
name: str,
|
|
882
|
+
fetch_fn: "Callable[[TState, TParam], Awaitable[T]]",
|
|
883
|
+
*,
|
|
884
|
+
initial_page_param: TParam,
|
|
885
|
+
max_pages: int,
|
|
886
|
+
stale_time: float,
|
|
887
|
+
gc_time: float,
|
|
888
|
+
refetch_interval: float | None = None,
|
|
889
|
+
keep_previous_data: bool,
|
|
890
|
+
retries: int,
|
|
891
|
+
retry_delay: float,
|
|
892
|
+
initial_data_updated_at: float | dt.datetime | None = None,
|
|
893
|
+
enabled: bool = True,
|
|
894
|
+
fetch_on_mount: bool = True,
|
|
895
|
+
key: QueryKey | Callable[[TState], QueryKey] | None = None,
|
|
896
|
+
):
|
|
897
|
+
self.name = name
|
|
898
|
+
self._fetch_fn = fetch_fn
|
|
899
|
+
self._initial_page_param = initial_page_param
|
|
900
|
+
self._get_next_page_param = None
|
|
901
|
+
self._get_previous_page_param = None
|
|
902
|
+
self._max_pages = max_pages
|
|
903
|
+
self._keep_previous_data = keep_previous_data
|
|
904
|
+
self._stale_time = stale_time
|
|
905
|
+
self._gc_time = gc_time
|
|
906
|
+
self._refetch_interval = refetch_interval
|
|
907
|
+
self._retries = retries
|
|
908
|
+
self._retry_delay = retry_delay
|
|
909
|
+
self._on_success_fn = None
|
|
910
|
+
self._on_error_fn = None
|
|
911
|
+
self._key = key
|
|
912
|
+
self._initial_data_updated_at = initial_data_updated_at
|
|
913
|
+
self._enabled = enabled
|
|
914
|
+
self._fetch_on_mount = fetch_on_mount
|
|
915
|
+
self._priv_result = f"__inf_query_{name}"
|
|
916
|
+
|
|
917
|
+
def key(self, fn: Callable[[TState], QueryKey]):
|
|
918
|
+
if self._key is not None:
|
|
919
|
+
raise RuntimeError(
|
|
920
|
+
f"Cannot use @{self.name}.key decorator when a key is already provided to @infinite_query(key=...)."
|
|
921
|
+
)
|
|
922
|
+
self._key = fn
|
|
923
|
+
return fn
|
|
924
|
+
|
|
925
|
+
def on_success(self, fn: OnSuccessFn[TState, list[T]]):
|
|
926
|
+
if self._on_success_fn is not None:
|
|
927
|
+
raise RuntimeError(
|
|
928
|
+
f"Duplicate on_success() decorator for infinite query '{self.name}'. Only one is allowed."
|
|
929
|
+
)
|
|
930
|
+
self._on_success_fn = fn # pyright: ignore[reportAttributeAccessIssue]
|
|
931
|
+
return fn
|
|
932
|
+
|
|
933
|
+
def on_error(self, fn: OnErrorFn[TState]):
|
|
934
|
+
if self._on_error_fn is not None:
|
|
935
|
+
raise RuntimeError(
|
|
936
|
+
f"Duplicate on_error() decorator for infinite query '{self.name}'. Only one is allowed."
|
|
937
|
+
)
|
|
938
|
+
self._on_error_fn = fn # pyright: ignore[reportAttributeAccessIssue]
|
|
939
|
+
return fn
|
|
940
|
+
|
|
941
|
+
def get_next_page_param(
|
|
942
|
+
self,
|
|
943
|
+
fn: Callable[[TState, list[Page[T, TParam]]], TParam | None],
|
|
944
|
+
) -> Callable[[TState, list[Page[T, TParam]]], TParam | None]:
|
|
945
|
+
if self._get_next_page_param is not None:
|
|
946
|
+
raise RuntimeError(
|
|
947
|
+
f"Duplicate get_next_page_param() decorator for infinite query '{self.name}'. Only one is allowed."
|
|
948
|
+
)
|
|
949
|
+
self._get_next_page_param = fn
|
|
950
|
+
return fn
|
|
951
|
+
|
|
952
|
+
def get_previous_page_param(
|
|
953
|
+
self,
|
|
954
|
+
fn: Callable[[TState, list[Page[T, TParam]]], TParam | None],
|
|
955
|
+
) -> Callable[[TState, list[Page[T, TParam]]], TParam | None]:
|
|
956
|
+
if self._get_previous_page_param is not None:
|
|
957
|
+
raise RuntimeError(
|
|
958
|
+
f"Duplicate get_previous_page_param() decorator for infinite query '{self.name}'. Only one is allowed."
|
|
959
|
+
)
|
|
960
|
+
self._get_previous_page_param = fn
|
|
961
|
+
return fn
|
|
962
|
+
|
|
963
|
+
@override
|
|
964
|
+
def initialize(self, state: Any, name: str) -> InfiniteQueryResult[T, TParam]:
|
|
965
|
+
result: InfiniteQueryResult[T, TParam] | None = getattr(
|
|
966
|
+
state, self._priv_result, None
|
|
967
|
+
)
|
|
968
|
+
if result:
|
|
969
|
+
return result
|
|
970
|
+
|
|
971
|
+
if self._get_next_page_param is None:
|
|
972
|
+
raise RuntimeError(
|
|
973
|
+
f"get_next_page_param must be set via @{self.name}.get_next_page_param decorator"
|
|
974
|
+
)
|
|
975
|
+
|
|
976
|
+
fetch_fn = bind_state(state, self._fetch_fn)
|
|
977
|
+
|
|
978
|
+
next_fn = bind_state(state, self._get_next_page_param)
|
|
979
|
+
prev_fn = (
|
|
980
|
+
bind_state(state, self._get_previous_page_param)
|
|
981
|
+
if self._get_previous_page_param
|
|
982
|
+
else None
|
|
983
|
+
)
|
|
984
|
+
|
|
985
|
+
if self._key is None:
|
|
986
|
+
raise RuntimeError(
|
|
987
|
+
f"key is required for infinite query '{self.name}'. Provide a key via @infinite_query(key=...) or @{self.name}.key decorator."
|
|
988
|
+
)
|
|
989
|
+
query = self._resolve_keyed(
|
|
990
|
+
state, fetch_fn, next_fn, prev_fn, self._initial_data_updated_at
|
|
991
|
+
)
|
|
992
|
+
|
|
993
|
+
on_success = None
|
|
994
|
+
if self._on_success_fn:
|
|
995
|
+
bound_fn = bind_state(state, self._on_success_fn)
|
|
996
|
+
|
|
997
|
+
async def on_success_wrapper(data: list[Page[T, TParam]]):
|
|
998
|
+
await maybe_await(call_flexible(bound_fn, [p.data for p in data]))
|
|
999
|
+
|
|
1000
|
+
on_success = on_success_wrapper
|
|
1001
|
+
|
|
1002
|
+
result = InfiniteQueryResult(
|
|
1003
|
+
query=query,
|
|
1004
|
+
stale_time=self._stale_time,
|
|
1005
|
+
keep_previous_data=self._keep_previous_data,
|
|
1006
|
+
gc_time=self._gc_time,
|
|
1007
|
+
refetch_interval=self._refetch_interval,
|
|
1008
|
+
on_success=on_success,
|
|
1009
|
+
on_error=bind_state(state, self._on_error_fn)
|
|
1010
|
+
if self._on_error_fn
|
|
1011
|
+
else None,
|
|
1012
|
+
enabled=self._enabled,
|
|
1013
|
+
fetch_on_mount=self._fetch_on_mount,
|
|
1014
|
+
)
|
|
1015
|
+
|
|
1016
|
+
setattr(state, self._priv_result, result)
|
|
1017
|
+
return result
|
|
1018
|
+
|
|
1019
|
+
def _resolve_keyed(
|
|
1020
|
+
self,
|
|
1021
|
+
state: TState,
|
|
1022
|
+
fetch_fn: Callable[[TParam], Awaitable[T]],
|
|
1023
|
+
next_fn: Callable[[list[Page[T, TParam]]], TParam | None],
|
|
1024
|
+
prev_fn: Callable[[list[Page[T, TParam]]], TParam | None] | None,
|
|
1025
|
+
initial_data_updated_at: float | dt.datetime | None,
|
|
1026
|
+
) -> Computed[InfiniteQuery[T, TParam]]:
|
|
1027
|
+
assert self._key is not None
|
|
1028
|
+
|
|
1029
|
+
# Create a Computed for the key - passthrough for constant keys, reactive for function keys
|
|
1030
|
+
if callable(self._key):
|
|
1031
|
+
key_computed = Computed(
|
|
1032
|
+
bind_state(state, self._key), name=f"inf_query.key.{self.name}"
|
|
1033
|
+
)
|
|
1034
|
+
else:
|
|
1035
|
+
constant_key = self._key # ensure a constant reference
|
|
1036
|
+
key_computed = Computed(
|
|
1037
|
+
lambda: constant_key, name=f"inf_query.key.{self.name}"
|
|
1038
|
+
)
|
|
1039
|
+
|
|
1040
|
+
render = PulseContext.get().render
|
|
1041
|
+
if render is None:
|
|
1042
|
+
raise RuntimeError("No render session available")
|
|
1043
|
+
store = render.query_store
|
|
1044
|
+
|
|
1045
|
+
def query() -> InfiniteQuery[T, TParam]:
|
|
1046
|
+
key = key_computed()
|
|
1047
|
+
return cast(
|
|
1048
|
+
InfiniteQuery[T, TParam],
|
|
1049
|
+
store.ensure_infinite(
|
|
1050
|
+
key,
|
|
1051
|
+
fetch_fn,
|
|
1052
|
+
initial_page_param=self._initial_page_param,
|
|
1053
|
+
get_next_page_param=next_fn,
|
|
1054
|
+
get_previous_page_param=prev_fn,
|
|
1055
|
+
max_pages=self._max_pages,
|
|
1056
|
+
gc_time=self._gc_time,
|
|
1057
|
+
retries=self._retries,
|
|
1058
|
+
retry_delay=self._retry_delay,
|
|
1059
|
+
initial_data_updated_at=initial_data_updated_at,
|
|
1060
|
+
),
|
|
1061
|
+
)
|
|
1062
|
+
|
|
1063
|
+
return Computed(query, name=f"inf_query.{self.name}")
|
|
1064
|
+
|
|
1065
|
+
def __get__(self, obj: Any, objtype: Any = None) -> InfiniteQueryResult[T, TParam]:
|
|
1066
|
+
if obj is None:
|
|
1067
|
+
return self # pyright: ignore[reportReturnType]
|
|
1068
|
+
return self.initialize(obj, self.name)
|
|
1069
|
+
|
|
1070
|
+
|
|
1071
|
+
@overload
|
|
1072
|
+
def infinite_query(
|
|
1073
|
+
fn: Callable[[TState, TParam], Awaitable[T]],
|
|
1074
|
+
*,
|
|
1075
|
+
initial_page_param: TParam,
|
|
1076
|
+
max_pages: int = 0,
|
|
1077
|
+
stale_time: float = 0.0,
|
|
1078
|
+
gc_time: float | None = 300.0,
|
|
1079
|
+
refetch_interval: float | None = None,
|
|
1080
|
+
keep_previous_data: bool = False,
|
|
1081
|
+
retries: int = 3,
|
|
1082
|
+
retry_delay: float | None = None,
|
|
1083
|
+
initial_data_updated_at: float | dt.datetime | None = None,
|
|
1084
|
+
enabled: bool = True,
|
|
1085
|
+
fetch_on_mount: bool = True,
|
|
1086
|
+
key: QueryKey | None = None,
|
|
1087
|
+
) -> InfiniteQueryProperty[T, TParam, TState]: ...
|
|
1088
|
+
|
|
1089
|
+
|
|
1090
|
+
@overload
|
|
1091
|
+
def infinite_query(
|
|
1092
|
+
fn: None = None,
|
|
1093
|
+
*,
|
|
1094
|
+
initial_page_param: TParam,
|
|
1095
|
+
max_pages: int = 0,
|
|
1096
|
+
stale_time: float = 0.0,
|
|
1097
|
+
gc_time: float | None = 300.0,
|
|
1098
|
+
refetch_interval: float | None = None,
|
|
1099
|
+
keep_previous_data: bool = False,
|
|
1100
|
+
retries: int = 3,
|
|
1101
|
+
retry_delay: float | None = None,
|
|
1102
|
+
initial_data_updated_at: float | dt.datetime | None = None,
|
|
1103
|
+
enabled: bool = True,
|
|
1104
|
+
fetch_on_mount: bool = True,
|
|
1105
|
+
key: QueryKey | None = None,
|
|
1106
|
+
) -> Callable[
|
|
1107
|
+
[Callable[[TState, Any], Awaitable[T]]],
|
|
1108
|
+
InfiniteQueryProperty[T, TParam, TState],
|
|
1109
|
+
]: ...
|
|
1110
|
+
|
|
1111
|
+
|
|
1112
|
+
def infinite_query(
|
|
1113
|
+
fn: Callable[[TState, TParam], Awaitable[T]] | None = None,
|
|
1114
|
+
*,
|
|
1115
|
+
initial_page_param: TParam,
|
|
1116
|
+
max_pages: int = 0,
|
|
1117
|
+
stale_time: float = 0.0,
|
|
1118
|
+
gc_time: float | None = 300.0,
|
|
1119
|
+
refetch_interval: float | None = None,
|
|
1120
|
+
keep_previous_data: bool = False,
|
|
1121
|
+
retries: int = 3,
|
|
1122
|
+
retry_delay: float | None = None,
|
|
1123
|
+
initial_data_updated_at: float | dt.datetime | None = None,
|
|
1124
|
+
enabled: bool = True,
|
|
1125
|
+
fetch_on_mount: bool = True,
|
|
1126
|
+
key: QueryKey | None = None,
|
|
1127
|
+
):
|
|
1128
|
+
def decorator(
|
|
1129
|
+
func: Callable[[TState, TParam], Awaitable[T]], /
|
|
1130
|
+
) -> InfiniteQueryProperty[T, TParam, TState]:
|
|
1131
|
+
sig = inspect.signature(func)
|
|
1132
|
+
params = list(sig.parameters.values())
|
|
1133
|
+
if not (len(params) == 2 and params[0].name == "self"):
|
|
1134
|
+
raise TypeError(
|
|
1135
|
+
"@infinite_query must be applied to a state method with signature (self, page_param)"
|
|
1136
|
+
)
|
|
1137
|
+
|
|
1138
|
+
return InfiniteQueryProperty(
|
|
1139
|
+
func.__name__,
|
|
1140
|
+
func,
|
|
1141
|
+
initial_page_param=initial_page_param,
|
|
1142
|
+
max_pages=max_pages,
|
|
1143
|
+
stale_time=stale_time,
|
|
1144
|
+
gc_time=gc_time if gc_time is not None else 300.0,
|
|
1145
|
+
refetch_interval=refetch_interval,
|
|
1146
|
+
keep_previous_data=keep_previous_data,
|
|
1147
|
+
retries=retries,
|
|
1148
|
+
retry_delay=RETRY_DELAY_DEFAULT if retry_delay is None else retry_delay,
|
|
1149
|
+
initial_data_updated_at=initial_data_updated_at,
|
|
1150
|
+
enabled=enabled,
|
|
1151
|
+
fetch_on_mount=fetch_on_mount,
|
|
1152
|
+
key=key,
|
|
1153
|
+
)
|
|
1154
|
+
|
|
1155
|
+
if fn:
|
|
1156
|
+
return decorator(fn)
|
|
1157
|
+
return decorator
|