reflex 0.7.9a1__py3-none-any.whl → 0.7.10a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of reflex might be problematic. Click here for more details.

@@ -0,0 +1,858 @@
1
+ """State manager for managing client states."""
2
+
3
+ import asyncio
4
+ import contextlib
5
+ import dataclasses
6
+ import functools
7
+ import time
8
+ import uuid
9
+ from abc import ABC, abstractmethod
10
+ from collections.abc import AsyncIterator
11
+ from hashlib import md5
12
+ from pathlib import Path
13
+
14
+ from redis import ResponseError
15
+ from redis.asyncio import Redis
16
+ from redis.asyncio.client import PubSub
17
+ from typing_extensions import override
18
+
19
+ from reflex import constants
20
+ from reflex.config import environment, get_config
21
+ from reflex.state import BaseState, _split_substate_key, _substate_key
22
+ from reflex.utils import console, path_ops, prerequisites
23
+ from reflex.utils.exceptions import (
24
+ InvalidLockWarningThresholdError,
25
+ InvalidStateManagerModeError,
26
+ LockExpiredError,
27
+ StateSchemaMismatchError,
28
+ )
29
+
30
+
31
+ @dataclasses.dataclass
32
+ class StateManager(ABC):
33
+ """A class to manage many client states."""
34
+
35
+ # The state class to use.
36
+ state: type[BaseState]
37
+
38
+ @classmethod
39
+ def create(cls, state: type[BaseState]):
40
+ """Create a new state manager.
41
+
42
+ Args:
43
+ state: The state class to use.
44
+
45
+ Raises:
46
+ InvalidStateManagerModeError: If the state manager mode is invalid.
47
+
48
+ Returns:
49
+ The state manager (either disk, memory or redis).
50
+ """
51
+ config = get_config()
52
+ if prerequisites.parse_redis_url() is not None:
53
+ config.state_manager_mode = constants.StateManagerMode.REDIS
54
+ if config.state_manager_mode == constants.StateManagerMode.MEMORY:
55
+ return StateManagerMemory(state=state)
56
+ if config.state_manager_mode == constants.StateManagerMode.DISK:
57
+ return StateManagerDisk(state=state)
58
+ if config.state_manager_mode == constants.StateManagerMode.REDIS:
59
+ redis = prerequisites.get_redis()
60
+ if redis is not None:
61
+ # make sure expiration values are obtained only from the config object on creation
62
+ return StateManagerRedis(
63
+ state=state,
64
+ redis=redis,
65
+ token_expiration=config.redis_token_expiration,
66
+ lock_expiration=config.redis_lock_expiration,
67
+ lock_warning_threshold=config.redis_lock_warning_threshold,
68
+ )
69
+ raise InvalidStateManagerModeError(
70
+ f"Expected one of: DISK, MEMORY, REDIS, got {config.state_manager_mode}"
71
+ )
72
+
73
+ @abstractmethod
74
+ async def get_state(self, token: str) -> BaseState:
75
+ """Get the state for a token.
76
+
77
+ Args:
78
+ token: The token to get the state for.
79
+
80
+ Returns:
81
+ The state for the token.
82
+ """
83
+ pass
84
+
85
+ @abstractmethod
86
+ async def set_state(self, token: str, state: BaseState):
87
+ """Set the state for a token.
88
+
89
+ Args:
90
+ token: The token to set the state for.
91
+ state: The state to set.
92
+ """
93
+ pass
94
+
95
+ @abstractmethod
96
+ @contextlib.asynccontextmanager
97
+ async def modify_state(self, token: str) -> AsyncIterator[BaseState]:
98
+ """Modify the state for a token while holding exclusive lock.
99
+
100
+ Args:
101
+ token: The token to modify the state for.
102
+
103
+ Yields:
104
+ The state for the token.
105
+ """
106
+ yield self.state()
107
+
108
+
109
+ @dataclasses.dataclass
110
+ class StateManagerMemory(StateManager):
111
+ """A state manager that stores states in memory."""
112
+
113
+ # The mapping of client ids to states.
114
+ states: dict[str, BaseState] = dataclasses.field(default_factory=dict)
115
+
116
+ # The mutex ensures the dict of mutexes is updated exclusively
117
+ _state_manager_lock: asyncio.Lock = dataclasses.field(default=asyncio.Lock())
118
+
119
+ # The dict of mutexes for each client
120
+ _states_locks: dict[str, asyncio.Lock] = dataclasses.field(
121
+ default_factory=dict, init=False
122
+ )
123
+
124
+ @override
125
+ async def get_state(self, token: str) -> BaseState:
126
+ """Get the state for a token.
127
+
128
+ Args:
129
+ token: The token to get the state for.
130
+
131
+ Returns:
132
+ The state for the token.
133
+ """
134
+ # Memory state manager ignores the substate suffix and always returns the top-level state.
135
+ token = _split_substate_key(token)[0]
136
+ if token not in self.states:
137
+ self.states[token] = self.state(_reflex_internal_init=True)
138
+ return self.states[token]
139
+
140
+ @override
141
+ async def set_state(self, token: str, state: BaseState):
142
+ """Set the state for a token.
143
+
144
+ Args:
145
+ token: The token to set the state for.
146
+ state: The state to set.
147
+ """
148
+ pass
149
+
150
+ @override
151
+ @contextlib.asynccontextmanager
152
+ async def modify_state(self, token: str) -> AsyncIterator[BaseState]:
153
+ """Modify the state for a token while holding exclusive lock.
154
+
155
+ Args:
156
+ token: The token to modify the state for.
157
+
158
+ Yields:
159
+ The state for the token.
160
+ """
161
+ # Memory state manager ignores the substate suffix and always returns the top-level state.
162
+ token = _split_substate_key(token)[0]
163
+ if token not in self._states_locks:
164
+ async with self._state_manager_lock:
165
+ if token not in self._states_locks:
166
+ self._states_locks[token] = asyncio.Lock()
167
+
168
+ async with self._states_locks[token]:
169
+ state = await self.get_state(token)
170
+ yield state
171
+ await self.set_state(token, state)
172
+
173
+
174
+ def _default_token_expiration() -> int:
175
+ """Get the default token expiration time.
176
+
177
+ Returns:
178
+ The default token expiration time.
179
+ """
180
+ return get_config().redis_token_expiration
181
+
182
+
183
+ def reset_disk_state_manager():
184
+ """Reset the disk state manager."""
185
+ states_directory = prerequisites.get_states_dir()
186
+ if states_directory.exists():
187
+ for path in states_directory.iterdir():
188
+ path.unlink()
189
+
190
+
191
+ @dataclasses.dataclass
192
+ class StateManagerDisk(StateManager):
193
+ """A state manager that stores states in memory."""
194
+
195
+ # The mapping of client ids to states.
196
+ states: dict[str, BaseState] = dataclasses.field(default_factory=dict)
197
+
198
+ # The mutex ensures the dict of mutexes is updated exclusively
199
+ _state_manager_lock: asyncio.Lock = dataclasses.field(default=asyncio.Lock())
200
+
201
+ # The dict of mutexes for each client
202
+ _states_locks: dict[str, asyncio.Lock] = dataclasses.field(
203
+ default_factory=dict,
204
+ init=False,
205
+ )
206
+
207
+ # The token expiration time (s).
208
+ token_expiration: int = dataclasses.field(default_factory=_default_token_expiration)
209
+
210
+ def __post_init_(self):
211
+ """Create a new state manager."""
212
+ path_ops.mkdir(self.states_directory)
213
+
214
+ self._purge_expired_states()
215
+
216
+ @functools.cached_property
217
+ def states_directory(self) -> Path:
218
+ """Get the states directory.
219
+
220
+ Returns:
221
+ The states directory.
222
+ """
223
+ return prerequisites.get_states_dir()
224
+
225
+ def _purge_expired_states(self):
226
+ """Purge expired states from the disk."""
227
+ import time
228
+
229
+ for path in path_ops.ls(self.states_directory):
230
+ # check path is a pickle file
231
+ if path.suffix != ".pkl":
232
+ continue
233
+
234
+ # load last edited field from file
235
+ last_edited = path.stat().st_mtime
236
+
237
+ # check if the file is older than the token expiration time
238
+ if time.time() - last_edited > self.token_expiration:
239
+ # remove the file
240
+ path.unlink()
241
+
242
+ def token_path(self, token: str) -> Path:
243
+ """Get the path for a token.
244
+
245
+ Args:
246
+ token: The token to get the path for.
247
+
248
+ Returns:
249
+ The path for the token.
250
+ """
251
+ return (
252
+ self.states_directory / f"{md5(token.encode()).hexdigest()}.pkl"
253
+ ).absolute()
254
+
255
+ async def load_state(self, token: str) -> BaseState | None:
256
+ """Load a state object based on the provided token.
257
+
258
+ Args:
259
+ token: The token used to identify the state object.
260
+
261
+ Returns:
262
+ The loaded state object or None.
263
+ """
264
+ token_path = self.token_path(token)
265
+
266
+ if token_path.exists():
267
+ try:
268
+ with token_path.open(mode="rb") as file:
269
+ return BaseState._deserialize(fp=file)
270
+ except Exception:
271
+ pass
272
+
273
+ async def populate_substates(
274
+ self, client_token: str, state: BaseState, root_state: BaseState
275
+ ):
276
+ """Populate the substates of a state object.
277
+
278
+ Args:
279
+ client_token: The client token.
280
+ state: The state object to populate.
281
+ root_state: The root state object.
282
+ """
283
+ for substate in state.get_substates():
284
+ substate_token = _substate_key(client_token, substate)
285
+
286
+ fresh_instance = await root_state.get_state(substate)
287
+ instance = await self.load_state(substate_token)
288
+ if instance is not None:
289
+ # Ensure all substates exist, even if they weren't serialized previously.
290
+ instance.substates = fresh_instance.substates
291
+ else:
292
+ instance = fresh_instance
293
+ state.substates[substate.get_name()] = instance
294
+ instance.parent_state = state
295
+
296
+ await self.populate_substates(client_token, instance, root_state)
297
+
298
+ @override
299
+ async def get_state(
300
+ self,
301
+ token: str,
302
+ ) -> BaseState:
303
+ """Get the state for a token.
304
+
305
+ Args:
306
+ token: The token to get the state for.
307
+
308
+ Returns:
309
+ The state for the token.
310
+ """
311
+ client_token = _split_substate_key(token)[0]
312
+ root_state = self.states.get(client_token)
313
+ if root_state is not None:
314
+ # Retrieved state from memory.
315
+ return root_state
316
+
317
+ # Deserialize root state from disk.
318
+ root_state = await self.load_state(_substate_key(client_token, self.state))
319
+ # Create a new root state tree with all substates instantiated.
320
+ fresh_root_state = self.state(_reflex_internal_init=True)
321
+ if root_state is None:
322
+ root_state = fresh_root_state
323
+ else:
324
+ # Ensure all substates exist, even if they were not serialized previously.
325
+ root_state.substates = fresh_root_state.substates
326
+ self.states[client_token] = root_state
327
+ await self.populate_substates(client_token, root_state, root_state)
328
+ return root_state
329
+
330
+ async def set_state_for_substate(self, client_token: str, substate: BaseState):
331
+ """Set the state for a substate.
332
+
333
+ Args:
334
+ client_token: The client token.
335
+ substate: The substate to set.
336
+ """
337
+ substate_token = _substate_key(client_token, substate)
338
+
339
+ if substate._get_was_touched():
340
+ substate._was_touched = False # Reset the touched flag after serializing.
341
+ pickle_state = substate._serialize()
342
+ if pickle_state:
343
+ if not self.states_directory.exists():
344
+ self.states_directory.mkdir(parents=True, exist_ok=True)
345
+ self.token_path(substate_token).write_bytes(pickle_state)
346
+
347
+ for substate_substate in substate.substates.values():
348
+ await self.set_state_for_substate(client_token, substate_substate)
349
+
350
+ @override
351
+ async def set_state(self, token: str, state: BaseState):
352
+ """Set the state for a token.
353
+
354
+ Args:
355
+ token: The token to set the state for.
356
+ state: The state to set.
357
+ """
358
+ client_token, substate = _split_substate_key(token)
359
+ await self.set_state_for_substate(client_token, state)
360
+
361
+ @override
362
+ @contextlib.asynccontextmanager
363
+ async def modify_state(self, token: str) -> AsyncIterator[BaseState]:
364
+ """Modify the state for a token while holding exclusive lock.
365
+
366
+ Args:
367
+ token: The token to modify the state for.
368
+
369
+ Yields:
370
+ The state for the token.
371
+ """
372
+ # Memory state manager ignores the substate suffix and always returns the top-level state.
373
+ client_token, substate = _split_substate_key(token)
374
+ if client_token not in self._states_locks:
375
+ async with self._state_manager_lock:
376
+ if client_token not in self._states_locks:
377
+ self._states_locks[client_token] = asyncio.Lock()
378
+
379
+ async with self._states_locks[client_token]:
380
+ state = await self.get_state(token)
381
+ yield state
382
+ await self.set_state(token, state)
383
+
384
+
385
+ def _default_lock_expiration() -> int:
386
+ """Get the default lock expiration time.
387
+
388
+ Returns:
389
+ The default lock expiration time.
390
+ """
391
+ return get_config().redis_lock_expiration
392
+
393
+
394
+ def _default_lock_warning_threshold() -> int:
395
+ """Get the default lock warning threshold.
396
+
397
+ Returns:
398
+ The default lock warning threshold.
399
+ """
400
+ return get_config().redis_lock_warning_threshold
401
+
402
+
403
+ @dataclasses.dataclass
404
+ class StateManagerRedis(StateManager):
405
+ """A state manager that stores states in redis."""
406
+
407
+ # The redis client to use.
408
+ redis: Redis
409
+
410
+ # The token expiration time (s).
411
+ token_expiration: int = dataclasses.field(default_factory=_default_token_expiration)
412
+
413
+ # The maximum time to hold a lock (ms).
414
+ lock_expiration: int = dataclasses.field(default_factory=_default_lock_expiration)
415
+
416
+ # The maximum time to hold a lock (ms) before warning.
417
+ lock_warning_threshold: int = dataclasses.field(
418
+ default_factory=_default_lock_warning_threshold
419
+ )
420
+
421
+ # The keyspace subscription string when redis is waiting for lock to be released.
422
+ _redis_notify_keyspace_events: str = dataclasses.field(
423
+ default="K" # Enable keyspace notifications (target a particular key)
424
+ "g" # For generic commands (DEL, EXPIRE, etc)
425
+ "x" # For expired events
426
+ "e" # For evicted events (i.e. maxmemory exceeded)
427
+ )
428
+
429
+ # These events indicate that a lock is no longer held.
430
+ _redis_keyspace_lock_release_events: set[bytes] = dataclasses.field(
431
+ default_factory=lambda: {
432
+ b"del",
433
+ b"expire",
434
+ b"expired",
435
+ b"evicted",
436
+ }
437
+ )
438
+
439
+ # Whether keyspace notifications have been enabled.
440
+ _redis_notify_keyspace_events_enabled: bool = dataclasses.field(default=False)
441
+
442
+ # The logical database number used by the redis client.
443
+ _redis_db: int = dataclasses.field(default=0)
444
+
445
+ def __post_init__(self):
446
+ """Validate the lock warning threshold.
447
+
448
+ Raises:
449
+ InvalidLockWarningThresholdError: If the lock warning threshold is invalid.
450
+ """
451
+ if self.lock_warning_threshold >= (lock_expiration := self.lock_expiration):
452
+ raise InvalidLockWarningThresholdError(
453
+ f"The lock warning threshold({self.lock_warning_threshold}) must be less than the lock expiration time({lock_expiration})."
454
+ )
455
+
456
+ def _get_required_state_classes(
457
+ self,
458
+ target_state_cls: type[BaseState],
459
+ subclasses: bool = False,
460
+ required_state_classes: set[type[BaseState]] | None = None,
461
+ ) -> set[type[BaseState]]:
462
+ """Recursively determine which states are required to fetch the target state.
463
+
464
+ This will always include potentially dirty substates that depend on vars
465
+ in the target_state_cls.
466
+
467
+ Args:
468
+ target_state_cls: The target state class being fetched.
469
+ subclasses: Whether to include subclasses of the target state.
470
+ required_state_classes: Recursive argument tracking state classes that have already been seen.
471
+
472
+ Returns:
473
+ The set of state classes required to fetch the target state.
474
+ """
475
+ if required_state_classes is None:
476
+ required_state_classes = set()
477
+ # Get the substates if requested.
478
+ if subclasses:
479
+ for substate in target_state_cls.get_substates():
480
+ self._get_required_state_classes(
481
+ substate,
482
+ subclasses=True,
483
+ required_state_classes=required_state_classes,
484
+ )
485
+ if target_state_cls in required_state_classes:
486
+ return required_state_classes
487
+ required_state_classes.add(target_state_cls)
488
+
489
+ # Get dependent substates.
490
+ for pd_substates in target_state_cls._get_potentially_dirty_states():
491
+ self._get_required_state_classes(
492
+ pd_substates,
493
+ subclasses=False,
494
+ required_state_classes=required_state_classes,
495
+ )
496
+
497
+ # Get the parent state if it exists.
498
+ if parent_state := target_state_cls.get_parent_state():
499
+ self._get_required_state_classes(
500
+ parent_state,
501
+ subclasses=False,
502
+ required_state_classes=required_state_classes,
503
+ )
504
+ return required_state_classes
505
+
506
+ def _get_populated_states(
507
+ self,
508
+ target_state: BaseState,
509
+ populated_states: dict[str, BaseState] | None = None,
510
+ ) -> dict[str, BaseState]:
511
+ """Recursively determine which states from target_state are already fetched.
512
+
513
+ Args:
514
+ target_state: The state to check for populated states.
515
+ populated_states: Recursive argument tracking states seen in previous calls.
516
+
517
+ Returns:
518
+ A dictionary of state full name to state instance.
519
+ """
520
+ if populated_states is None:
521
+ populated_states = {}
522
+ if target_state.get_full_name() in populated_states:
523
+ return populated_states
524
+ populated_states[target_state.get_full_name()] = target_state
525
+ for substate in target_state.substates.values():
526
+ self._get_populated_states(substate, populated_states=populated_states)
527
+ if target_state.parent_state is not None:
528
+ self._get_populated_states(
529
+ target_state.parent_state, populated_states=populated_states
530
+ )
531
+ return populated_states
532
+
533
+ @override
534
+ async def get_state(
535
+ self,
536
+ token: str,
537
+ top_level: bool = True,
538
+ for_state_instance: BaseState | None = None,
539
+ ) -> BaseState:
540
+ """Get the state for a token.
541
+
542
+ Args:
543
+ token: The token to get the state for.
544
+ top_level: If true, return an instance of the top-level state (self.state).
545
+ for_state_instance: If provided, attach the requested states to this existing state tree.
546
+
547
+ Returns:
548
+ The state for the token.
549
+
550
+ Raises:
551
+ RuntimeError: when the state_cls is not specified in the token, or when the parent state for a
552
+ requested state was not fetched.
553
+ """
554
+ # Split the actual token from the fully qualified substate name.
555
+ token, state_path = _split_substate_key(token)
556
+ if state_path:
557
+ # Get the State class associated with the given path.
558
+ state_cls = self.state.get_class_substate(state_path)
559
+ else:
560
+ raise RuntimeError(
561
+ f"StateManagerRedis requires token to be specified in the form of {{token}}_{{state_full_name}}, but got {token}"
562
+ )
563
+
564
+ # Determine which states we already have.
565
+ flat_state_tree: dict[str, BaseState] = (
566
+ self._get_populated_states(for_state_instance) if for_state_instance else {}
567
+ )
568
+
569
+ # Determine which states from the tree need to be fetched.
570
+ required_state_classes = sorted(
571
+ self._get_required_state_classes(state_cls, subclasses=True)
572
+ - {type(s) for s in flat_state_tree.values()},
573
+ key=lambda x: x.get_full_name(),
574
+ )
575
+
576
+ redis_pipeline = self.redis.pipeline()
577
+ for state_cls in required_state_classes:
578
+ redis_pipeline.get(_substate_key(token, state_cls))
579
+
580
+ for state_cls, redis_state in zip(
581
+ required_state_classes,
582
+ await redis_pipeline.execute(),
583
+ strict=False,
584
+ ):
585
+ state = None
586
+
587
+ if redis_state is not None:
588
+ # Deserialize the substate.
589
+ with contextlib.suppress(StateSchemaMismatchError):
590
+ state = BaseState._deserialize(data=redis_state)
591
+ if state is None:
592
+ # Key didn't exist or schema mismatch so create a new instance for this token.
593
+ state = state_cls(
594
+ init_substates=False,
595
+ _reflex_internal_init=True,
596
+ )
597
+ flat_state_tree[state.get_full_name()] = state
598
+ if state.get_parent_state() is not None:
599
+ parent_state_name, _dot, state_name = state.get_full_name().rpartition(
600
+ "."
601
+ )
602
+ parent_state = flat_state_tree.get(parent_state_name)
603
+ if parent_state is None:
604
+ raise RuntimeError(
605
+ f"Parent state for {state.get_full_name()} was not found "
606
+ "in the state tree, but should have already been fetched. "
607
+ "This is a bug",
608
+ )
609
+ parent_state.substates[state_name] = state
610
+ state.parent_state = parent_state
611
+
612
+ # To retain compatibility with previous implementation, by default, we return
613
+ # the top-level state which should always be fetched or already cached.
614
+ if top_level:
615
+ return flat_state_tree[self.state.get_full_name()]
616
+ return flat_state_tree[state_cls.get_full_name()]
617
+
618
+ @override
619
+ async def set_state(
620
+ self,
621
+ token: str,
622
+ state: BaseState,
623
+ lock_id: bytes | None = None,
624
+ ):
625
+ """Set the state for a token.
626
+
627
+ Args:
628
+ token: The token to set the state for.
629
+ state: The state to set.
630
+ lock_id: If provided, the lock_key must be set to this value to set the state.
631
+
632
+ Raises:
633
+ LockExpiredError: If lock_id is provided and the lock for the token is not held by that ID.
634
+ RuntimeError: If the state instance doesn't match the state name in the token.
635
+ """
636
+ # Check that we're holding the lock.
637
+ if (
638
+ lock_id is not None
639
+ and await self.redis.get(self._lock_key(token)) != lock_id
640
+ ):
641
+ raise LockExpiredError(
642
+ f"Lock expired for token {token} while processing. Consider increasing "
643
+ f"`app.state_manager.lock_expiration` (currently {self.lock_expiration}) "
644
+ "or use `@rx.event(background=True)` decorator for long-running tasks."
645
+ )
646
+ elif lock_id is not None:
647
+ time_taken = self.lock_expiration / 1000 - (
648
+ await self.redis.ttl(self._lock_key(token))
649
+ )
650
+ if time_taken > self.lock_warning_threshold / 1000:
651
+ console.warn(
652
+ f"Lock for token {token} was held too long {time_taken=}s, "
653
+ f"use `@rx.event(background=True)` decorator for long-running tasks.",
654
+ dedupe=True,
655
+ )
656
+
657
+ client_token, substate_name = _split_substate_key(token)
658
+ # If the substate name on the token doesn't match the instance name, it cannot have a parent.
659
+ if state.parent_state is not None and state.get_full_name() != substate_name:
660
+ raise RuntimeError(
661
+ f"Cannot `set_state` with mismatching token {token} and substate {state.get_full_name()}."
662
+ )
663
+
664
+ # Recursively set_state on all known substates.
665
+ tasks = [
666
+ asyncio.create_task(
667
+ self.set_state(
668
+ _substate_key(client_token, substate),
669
+ substate,
670
+ lock_id,
671
+ )
672
+ )
673
+ for substate in state.substates.values()
674
+ ]
675
+ # Persist only the given state (parents or substates are excluded by BaseState.__getstate__).
676
+ if state._get_was_touched():
677
+ pickle_state = state._serialize()
678
+ if pickle_state:
679
+ await self.redis.set(
680
+ _substate_key(client_token, state),
681
+ pickle_state,
682
+ ex=self.token_expiration,
683
+ )
684
+
685
+ # Wait for substates to be persisted.
686
+ for t in tasks:
687
+ await t
688
+
689
+ @override
690
+ @contextlib.asynccontextmanager
691
+ async def modify_state(self, token: str) -> AsyncIterator[BaseState]:
692
+ """Modify the state for a token while holding exclusive lock.
693
+
694
+ Args:
695
+ token: The token to modify the state for.
696
+
697
+ Yields:
698
+ The state for the token.
699
+ """
700
+ async with self._lock(token) as lock_id:
701
+ state = await self.get_state(token)
702
+ yield state
703
+ await self.set_state(token, state, lock_id)
704
+
705
+ @staticmethod
706
+ def _lock_key(token: str) -> bytes:
707
+ """Get the redis key for a token's lock.
708
+
709
+ Args:
710
+ token: The token to get the lock key for.
711
+
712
+ Returns:
713
+ The redis lock key for the token.
714
+ """
715
+ # All substates share the same lock domain, so ignore any substate path suffix.
716
+ client_token = _split_substate_key(token)[0]
717
+ return f"{client_token}_lock".encode()
718
+
719
+ async def _try_get_lock(self, lock_key: bytes, lock_id: bytes) -> bool | None:
720
+ """Try to get a redis lock for a token.
721
+
722
+ Args:
723
+ lock_key: The redis key for the lock.
724
+ lock_id: The ID of the lock.
725
+
726
+ Returns:
727
+ True if the lock was obtained.
728
+ """
729
+ return await self.redis.set(
730
+ lock_key,
731
+ lock_id,
732
+ px=self.lock_expiration,
733
+ nx=True, # only set if it doesn't exist
734
+ )
735
+
736
+ async def _get_pubsub_message(
737
+ self, pubsub: PubSub, timeout: float | None = None
738
+ ) -> None:
739
+ """Get lock release events from the pubsub.
740
+
741
+ Args:
742
+ pubsub: The pubsub to get a message from.
743
+ timeout: Remaining time to wait for a message.
744
+
745
+ Returns:
746
+ The message.
747
+ """
748
+ if timeout is None:
749
+ timeout = self.lock_expiration / 1000.0
750
+
751
+ started = time.time()
752
+ message = await pubsub.get_message(
753
+ ignore_subscribe_messages=True,
754
+ timeout=timeout,
755
+ )
756
+ if (
757
+ message is None
758
+ or message["data"] not in self._redis_keyspace_lock_release_events
759
+ ):
760
+ remaining = timeout - (time.time() - started)
761
+ if remaining <= 0:
762
+ return
763
+ await self._get_pubsub_message(pubsub, timeout=remaining)
764
+
765
+ async def _enable_keyspace_notifications(self):
766
+ """Enable keyspace notifications for the redis server.
767
+
768
+ Raises:
769
+ ResponseError: when the keyspace config cannot be set.
770
+ """
771
+ if self._redis_notify_keyspace_events_enabled:
772
+ return
773
+ # Find out which logical database index is being used.
774
+ self._redis_db = self.redis.get_connection_kwargs().get("db", self._redis_db)
775
+
776
+ try:
777
+ await self.redis.config_set(
778
+ "notify-keyspace-events",
779
+ self._redis_notify_keyspace_events,
780
+ )
781
+ except ResponseError:
782
+ # Some redis servers only allow out-of-band configuration, so ignore errors here.
783
+ if not environment.REFLEX_IGNORE_REDIS_CONFIG_ERROR.get():
784
+ raise
785
+ self._redis_notify_keyspace_events_enabled = True
786
+
787
+ async def _wait_lock(self, lock_key: bytes, lock_id: bytes) -> None:
788
+ """Wait for a redis lock to be released via pubsub.
789
+
790
+ Coroutine will not return until the lock is obtained.
791
+
792
+ Args:
793
+ lock_key: The redis key for the lock.
794
+ lock_id: The ID of the lock.
795
+ """
796
+ # Enable keyspace notifications for the lock key, so we know when it is available.
797
+ await self._enable_keyspace_notifications()
798
+ lock_key_channel = f"__keyspace@{self._redis_db}__:{lock_key.decode()}"
799
+ async with self.redis.pubsub() as pubsub:
800
+ await pubsub.psubscribe(lock_key_channel)
801
+ # wait for the lock to be released
802
+ while True:
803
+ # fast path
804
+ if await self._try_get_lock(lock_key, lock_id):
805
+ return
806
+ # wait for lock events
807
+ await self._get_pubsub_message(pubsub)
808
+
809
+ @contextlib.asynccontextmanager
810
+ async def _lock(self, token: str):
811
+ """Obtain a redis lock for a token.
812
+
813
+ Args:
814
+ token: The token to obtain a lock for.
815
+
816
+ Yields:
817
+ The ID of the lock (to be passed to set_state).
818
+
819
+ Raises:
820
+ LockExpiredError: If the lock has expired while processing the event.
821
+ """
822
+ lock_key = self._lock_key(token)
823
+ lock_id = uuid.uuid4().hex.encode()
824
+
825
+ if not await self._try_get_lock(lock_key, lock_id):
826
+ # Missed the fast-path to get lock, subscribe for lock delete/expire events
827
+ await self._wait_lock(lock_key, lock_id)
828
+ state_is_locked = True
829
+
830
+ try:
831
+ yield lock_id
832
+ except LockExpiredError:
833
+ state_is_locked = False
834
+ raise
835
+ finally:
836
+ if state_is_locked:
837
+ # only delete our lock
838
+ await self.redis.delete(lock_key)
839
+
840
+ async def close(self):
841
+ """Explicitly close the redis connection and connection_pool.
842
+
843
+ It is necessary in testing scenarios to close between asyncio test cases
844
+ to avoid having lingering redis connections associated with event loops
845
+ that will be closed (each test case uses its own event loop).
846
+
847
+ Note: Connections will be automatically reopened when needed.
848
+ """
849
+ await self.redis.aclose(close_connection_pool=True)
850
+
851
+
852
+ def get_state_manager() -> StateManager:
853
+ """Get the state manager for the app that is currently running.
854
+
855
+ Returns:
856
+ The state manager.
857
+ """
858
+ return prerequisites.get_and_validate_app().app.state_manager