GeneralManager 0.14.1__py3-none-any.whl → 0.15.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. general_manager/__init__.py +49 -0
  2. general_manager/api/__init__.py +36 -0
  3. general_manager/api/graphql.py +92 -43
  4. general_manager/api/mutation.py +35 -10
  5. general_manager/api/property.py +26 -3
  6. general_manager/apps.py +23 -16
  7. general_manager/bucket/__init__.py +32 -0
  8. general_manager/bucket/baseBucket.py +76 -64
  9. general_manager/bucket/calculationBucket.py +188 -108
  10. general_manager/bucket/databaseBucket.py +130 -49
  11. general_manager/bucket/groupBucket.py +113 -60
  12. general_manager/cache/__init__.py +38 -0
  13. general_manager/cache/cacheDecorator.py +29 -17
  14. general_manager/cache/cacheTracker.py +34 -15
  15. general_manager/cache/dependencyIndex.py +117 -33
  16. general_manager/cache/modelDependencyCollector.py +17 -8
  17. general_manager/cache/signals.py +17 -6
  18. general_manager/factory/__init__.py +34 -5
  19. general_manager/factory/autoFactory.py +57 -60
  20. general_manager/factory/factories.py +39 -14
  21. general_manager/factory/factoryMethods.py +38 -1
  22. general_manager/interface/__init__.py +36 -0
  23. general_manager/interface/baseInterface.py +71 -27
  24. general_manager/interface/calculationInterface.py +18 -10
  25. general_manager/interface/databaseBasedInterface.py +102 -71
  26. general_manager/interface/databaseInterface.py +66 -20
  27. general_manager/interface/models.py +10 -4
  28. general_manager/interface/readOnlyInterface.py +44 -30
  29. general_manager/manager/__init__.py +36 -3
  30. general_manager/manager/generalManager.py +73 -47
  31. general_manager/manager/groupManager.py +72 -17
  32. general_manager/manager/input.py +23 -15
  33. general_manager/manager/meta.py +53 -53
  34. general_manager/measurement/__init__.py +37 -2
  35. general_manager/measurement/measurement.py +135 -58
  36. general_manager/measurement/measurementField.py +161 -61
  37. general_manager/permission/__init__.py +32 -1
  38. general_manager/permission/basePermission.py +29 -12
  39. general_manager/permission/managerBasedPermission.py +32 -26
  40. general_manager/permission/mutationPermission.py +32 -3
  41. general_manager/permission/permissionChecks.py +9 -1
  42. general_manager/permission/permissionDataManager.py +49 -15
  43. general_manager/permission/utils.py +14 -3
  44. general_manager/rule/__init__.py +27 -1
  45. general_manager/rule/handler.py +90 -5
  46. general_manager/rule/rule.py +40 -27
  47. general_manager/utils/__init__.py +44 -2
  48. general_manager/utils/argsToKwargs.py +17 -9
  49. general_manager/utils/filterParser.py +29 -30
  50. general_manager/utils/formatString.py +2 -0
  51. general_manager/utils/jsonEncoder.py +14 -1
  52. general_manager/utils/makeCacheKey.py +18 -12
  53. general_manager/utils/noneToZero.py +8 -6
  54. general_manager/utils/pathMapping.py +92 -29
  55. general_manager/utils/public_api.py +49 -0
  56. general_manager/utils/testing.py +135 -69
  57. {generalmanager-0.14.1.dist-info → generalmanager-0.15.1.dist-info}/METADATA +10 -2
  58. generalmanager-0.15.1.dist-info/RECORD +62 -0
  59. generalmanager-0.14.1.dist-info/RECORD +0 -58
  60. {generalmanager-0.14.1.dist-info → generalmanager-0.15.1.dist-info}/WHEEL +0 -0
  61. {generalmanager-0.14.1.dist-info → generalmanager-0.15.1.dist-info}/licenses/LICENSE +0 -0
  62. {generalmanager-0.14.1.dist-info → generalmanager-0.15.1.dist-info}/top_level.txt +0 -0
@@ -1,10 +1,11 @@
1
+ """Grouping bucket implementation for aggregating GeneralManager instances."""
2
+
1
3
  from __future__ import annotations
2
4
  from typing import (
3
5
  Type,
4
6
  Generator,
5
7
  Any,
6
8
  )
7
- import json
8
9
  from general_manager.manager.groupManager import GroupManager
9
10
  from general_manager.bucket.baseBucket import (
10
11
  Bucket,
@@ -13,37 +14,46 @@ from general_manager.bucket.baseBucket import (
13
14
 
14
15
 
15
16
  class GroupBucket(Bucket[GeneralManagerType]):
17
+ """Bucket variant that groups managers by specified attributes."""
16
18
 
17
19
  def __init__(
18
20
  self,
19
21
  manager_class: Type[GeneralManagerType],
20
22
  group_by_keys: tuple[str, ...],
21
23
  data: Bucket[GeneralManagerType],
22
- ):
24
+ ) -> None:
23
25
  """
24
- Initializes a GroupBucket by grouping data based on specified attribute keys.
26
+ Build a grouping bucket from the provided base data.
27
+
28
+ Parameters:
29
+ manager_class (type[GeneralManagerType]): GeneralManager subclass represented by the bucket.
30
+ group_by_keys (tuple[str, ...]): Attribute names used to define each group.
31
+ data (Bucket[GeneralManagerType]): Source bucket whose entries are grouped.
25
32
 
26
- Args:
27
- manager_class: The class type of the manager objects to be grouped.
28
- group_by_keys: Tuple of attribute names to group the data by.
29
- data: The underlying Bucket containing manager instances to be grouped.
33
+ Returns:
34
+ None
30
35
 
31
36
  Raises:
32
- TypeError: If any group-by key is not a string.
33
- ValueError: If any group-by key is not a valid attribute of the manager class.
37
+ TypeError: If a group-by key is not a string.
38
+ ValueError: If a group-by key is not a valid manager attribute.
34
39
  """
35
40
  super().__init__(manager_class)
36
41
  self.__checkGroupByArguments(group_by_keys)
37
42
  self._group_by_keys = group_by_keys
38
- self._data = self.__buildGroupedManager(data)
39
- self._basis_data = data
43
+ self._data: list[GroupManager[GeneralManagerType]] = self.__buildGroupedManager(
44
+ data
45
+ )
46
+ self._basis_data: Bucket[GeneralManagerType] = data
40
47
 
41
48
  def __eq__(self, other: object) -> bool:
42
49
  """
43
- Checks whether this GroupBucket is equal to another by comparing grouped data, manager class, and group-by keys.
50
+ Compare two grouping buckets for equality.
51
+
52
+ Parameters:
53
+ other (object): Object compared against the current bucket.
44
54
 
45
55
  Returns:
46
- True if both instances have identical grouped data, manager class, and group-by keys; otherwise, False.
56
+ bool: True when grouped data, manager class, and grouping keys match.
47
57
  """
48
58
  if not isinstance(other, self.__class__):
49
59
  return False
@@ -55,11 +65,17 @@ class GroupBucket(Bucket[GeneralManagerType]):
55
65
 
56
66
  def __checkGroupByArguments(self, group_by_keys: tuple[str, ...]) -> None:
57
67
  """
58
- Checks that each group-by key is a string and a valid attribute of the manager class.
68
+ Validate the supplied group-by keys.
69
+
70
+ Parameters:
71
+ group_by_keys (tuple[str, ...]): Attribute names requested for grouping.
72
+
73
+ Returns:
74
+ None
59
75
 
60
76
  Raises:
61
- TypeError: If any group-by key is not a string.
62
- ValueError: If any group-by key is not a valid attribute of the manager class.
77
+ TypeError: If a key is not a string.
78
+ ValueError: If a key is not an attribute exposed by the manager interface.
63
79
  """
64
80
  if not all(isinstance(arg, str) for arg in group_by_keys):
65
81
  raise TypeError("groupBy() arguments must be a strings")
@@ -76,20 +92,20 @@ class GroupBucket(Bucket[GeneralManagerType]):
76
92
  data: Bucket[GeneralManagerType],
77
93
  ) -> list[GroupManager[GeneralManagerType]]:
78
94
  """
79
- Constructs a list of GroupManager instances, each representing a unique group of entries from the provided data bucket based on the current group-by keys.
95
+ Construct grouped manager objects for every unique combination of key values.
80
96
 
81
- Args:
82
- data: The bucket of manager instances to be grouped.
97
+ Parameters:
98
+ data (Bucket[GeneralManagerType]): Source bucket that will be partitioned by the configured keys.
83
99
 
84
100
  Returns:
85
- A list of GroupManager objects, each corresponding to a unique combination of group-by attribute values found in the data.
101
+ list[GroupManager[GeneralManagerType]]: Group managers covering all key combinations.
86
102
  """
87
103
  group_by_values: set[tuple[tuple[str, Any], ...]] = set()
88
104
  for entry in data:
89
105
  key = tuple((arg, getattr(entry, arg)) for arg in self._group_by_keys)
90
106
  group_by_values.add(key)
91
107
 
92
- groups = []
108
+ groups: list[GroupManager[GeneralManagerType]] = []
93
109
  for group_by_value in sorted(group_by_values, key=str):
94
110
  group_by_dict = {key: value for key, value in group_by_value}
95
111
  grouped_manager_objects = data.filter(**group_by_dict)
@@ -102,10 +118,16 @@ class GroupBucket(Bucket[GeneralManagerType]):
102
118
 
103
119
  def __or__(self, other: object) -> GroupBucket[GeneralManagerType]:
104
120
  """
105
- Returns a new GroupBucket representing the union of this bucket and another, combining their underlying data.
121
+ Combine two grouping buckets produced from the same manager class.
122
+
123
+ Parameters:
124
+ other (object): Another grouping bucket to merge.
125
+
126
+ Returns:
127
+ GroupBucket[GeneralManagerType]: Bucket representing the union of both inputs.
106
128
 
107
129
  Raises:
108
- ValueError: If the other object is not a GroupBucket of the same type or uses a different manager class.
130
+ ValueError: If `other` is not a compatible GroupBucket instance.
109
131
  """
110
132
  if not isinstance(other, self.__class__):
111
133
  raise ValueError("Cannot combine different bucket types")
@@ -119,18 +141,22 @@ class GroupBucket(Bucket[GeneralManagerType]):
119
141
 
120
142
  def __iter__(self) -> Generator[GroupManager[GeneralManagerType], None, None]:
121
143
  """
122
- Yields each grouped manager in the current GroupBucket.
144
+ Iterate over the grouped managers produced by this bucket.
123
145
 
124
- Returns:
125
- A generator yielding GroupManager instances representing each group.
146
+ Yields:
147
+ GroupManager[GeneralManagerType]: Individual group manager instances.
126
148
  """
127
149
  yield from self._data
128
150
 
129
151
  def filter(self, **kwargs: Any) -> GroupBucket[GeneralManagerType]:
130
152
  """
131
- Returns a new GroupBucket containing only the entries from the underlying data that match the specified filter criteria.
153
+ Return a grouped bucket filtered by the provided lookups.
132
154
 
133
- Keyword arguments correspond to attribute-value pairs used for filtering.
155
+ Parameters:
156
+ **kwargs: Field lookups evaluated against the underlying bucket.
157
+
158
+ Returns:
159
+ GroupBucket[GeneralManagerType]: Grouped bucket containing only matching records.
134
160
  """
135
161
  new_basis_data = self._basis_data.filter(**kwargs)
136
162
  return GroupBucket(
@@ -141,9 +167,13 @@ class GroupBucket(Bucket[GeneralManagerType]):
141
167
 
142
168
  def exclude(self, **kwargs: Any) -> GroupBucket[GeneralManagerType]:
143
169
  """
144
- Returns a new GroupBucket excluding entries from the underlying data that match the given criteria.
170
+ Return a grouped bucket that excludes records matching the provided lookups.
145
171
 
146
- Keyword arguments specify attribute-value pairs to exclude from the basis data. The resulting GroupBucket retains the same grouping keys and manager class.
172
+ Parameters:
173
+ **kwargs: Field lookups whose matches should be removed from the underlying bucket.
174
+
175
+ Returns:
176
+ GroupBucket[GeneralManagerType]: Grouped bucket built from the filtered base data.
147
177
  """
148
178
  new_basis_data = self._basis_data.exclude(**kwargs)
149
179
  return GroupBucket(
@@ -154,7 +184,10 @@ class GroupBucket(Bucket[GeneralManagerType]):
154
184
 
155
185
  def first(self) -> GroupManager[GeneralManagerType] | None:
156
186
  """
157
- Returns the first grouped manager in the collection, or None if the collection is empty.
187
+ Return the first grouped manager in the collection.
188
+
189
+ Returns:
190
+ GroupManager[GeneralManagerType] | None: First group when available.
158
191
  """
159
192
  try:
160
193
  return next(iter(self))
@@ -163,7 +196,10 @@ class GroupBucket(Bucket[GeneralManagerType]):
163
196
 
164
197
  def last(self) -> GroupManager[GeneralManagerType] | None:
165
198
  """
166
- Returns the last grouped manager in the collection, or None if the collection is empty.
199
+ Return the last grouped manager in the collection.
200
+
201
+ Returns:
202
+ GroupManager[GeneralManagerType] | None: Last group when available.
167
203
  """
168
204
  items = list(self)
169
205
  if items:
@@ -172,30 +208,34 @@ class GroupBucket(Bucket[GeneralManagerType]):
172
208
 
173
209
  def count(self) -> int:
174
210
  """
175
- Returns the number of grouped managers in the bucket.
211
+ Count the number of grouped managers in the bucket.
212
+
213
+ Returns:
214
+ int: Number of groups.
176
215
  """
177
216
  return sum(1 for _ in self)
178
217
 
179
218
  def all(self) -> Bucket[GeneralManagerType]:
180
219
  """
181
- Returns the current GroupBucket instance.
220
+ Return the current grouping bucket.
182
221
 
183
- This method provides compatibility with interfaces expecting an `all()` method to retrieve the full collection.
222
+ Returns:
223
+ Bucket[GeneralManagerType]: This instance.
184
224
  """
185
225
  return self
186
226
 
187
227
  def get(self, **kwargs: Any) -> GroupManager[GeneralManagerType]:
188
228
  """
189
- Returns the first grouped manager matching the specified filter criteria.
229
+ Retrieve the first grouped manager matching the supplied filters.
190
230
 
191
- Args:
192
- **kwargs: Attribute-value pairs to filter grouped managers.
231
+ Parameters:
232
+ **kwargs: Field lookups applied to the grouped data.
193
233
 
194
234
  Returns:
195
- The first GroupManager instance matching the filter criteria.
235
+ GroupManager[GeneralManagerType]: Matching grouped manager.
196
236
 
197
237
  Raises:
198
- ValueError: If no grouped manager matches the provided criteria.
238
+ ValueError: If no grouped manager matches the filters.
199
239
  """
200
240
  first_value = self.filter(**kwargs).first()
201
241
  if first_value is None:
@@ -208,13 +248,18 @@ class GroupBucket(Bucket[GeneralManagerType]):
208
248
  self, item: int | slice
209
249
  ) -> GroupManager[GeneralManagerType] | GroupBucket[GeneralManagerType]:
210
250
  """
211
- Returns a grouped manager by index or a new GroupBucket by slice.
251
+ Access a specific group or a slice of groups.
252
+
253
+ Parameters:
254
+ item (int | slice): Index or slice describing the desired groups.
212
255
 
213
- If an integer index is provided, returns the corresponding GroupManager. If a slice is provided, returns a new GroupBucket containing the union of the basis data from the selected groups.
256
+ Returns:
257
+ GroupManager[GeneralManagerType] | GroupBucket[GeneralManagerType]:
258
+ Group at the specified index or a new bucket built from the selected groups.
214
259
 
215
260
  Raises:
216
- ValueError: If slicing results in no groups.
217
- TypeError: If the argument is not an int or slice.
261
+ ValueError: If the requested slice contains no groups.
262
+ TypeError: If the argument is not an integer or slice.
218
263
  """
219
264
  if isinstance(item, int):
220
265
  return self._data[item]
@@ -233,19 +278,22 @@ class GroupBucket(Bucket[GeneralManagerType]):
233
278
 
234
279
  def __len__(self) -> int:
235
280
  """
236
- Returns the number of grouped managers in the GroupBucket.
281
+ Return the number of grouped managers.
282
+
283
+ Returns:
284
+ int: Number of groups.
237
285
  """
238
286
  return self.count()
239
287
 
240
288
  def __contains__(self, item: GeneralManagerType) -> bool:
241
289
  """
242
- Checks if the given manager instance is present in the underlying basis data.
290
+ Determine whether the given manager instance exists in the underlying data.
243
291
 
244
- Args:
245
- item: The manager instance to check for membership.
292
+ Parameters:
293
+ item (GeneralManagerType): Manager instance checked for membership.
246
294
 
247
295
  Returns:
248
- True if the item exists in the basis data; otherwise, False.
296
+ bool: True if the instance is present in the basis data.
249
297
  """
250
298
  return item in self._basis_data
251
299
 
@@ -255,14 +303,14 @@ class GroupBucket(Bucket[GeneralManagerType]):
255
303
  reverse: bool = False,
256
304
  ) -> Bucket[GeneralManagerType]:
257
305
  """
258
- Returns a new GroupBucket with grouped managers sorted by the specified attribute keys.
306
+ Return a new GroupBucket sorted by the specified attributes.
259
307
 
260
- Args:
261
- key: A string or tuple of strings specifying the attribute(s) to sort by.
262
- reverse: If True, sorts in descending order. Defaults to False.
308
+ Parameters:
309
+ key (str | tuple[str, ...]): Attribute name(s) used for sorting.
310
+ reverse (bool): Whether to apply descending order.
263
311
 
264
312
  Returns:
265
- A new GroupBucket instance with grouped managers sorted by the given keys.
313
+ Bucket[GeneralManagerType]: Sorted grouping bucket.
266
314
  """
267
315
  if isinstance(key, str):
268
316
  key = (key,)
@@ -285,9 +333,13 @@ class GroupBucket(Bucket[GeneralManagerType]):
285
333
 
286
334
  def group_by(self, *group_by_keys: str) -> GroupBucket[GeneralManagerType]:
287
335
  """
288
- Return a new GroupBucket grouped by the current and additional attribute keys.
289
-
290
- Additional group-by keys are appended to the existing grouping, and the new GroupBucket is constructed from the same underlying data.
336
+ Extend the grouping with additional attribute keys.
337
+
338
+ Parameters:
339
+ *group_by_keys (str): Attribute names appended to the current grouping.
340
+
341
+ Returns:
342
+ GroupBucket[GeneralManagerType]: New bucket grouped by the combined key set.
291
343
  """
292
344
  return GroupBucket(
293
345
  self._manager_class,
@@ -297,9 +349,10 @@ class GroupBucket(Bucket[GeneralManagerType]):
297
349
 
298
350
  def none(self) -> GroupBucket[GeneralManagerType]:
299
351
  """
300
- Return a new empty GroupBucket with the same manager class and group-by keys as the current instance.
301
-
302
- This method creates a GroupBucket containing no items, preserving the grouping configuration of the original.
352
+ Produce an empty grouping bucket that preserves the current configuration.
353
+
354
+ Returns:
355
+ GroupBucket[GeneralManagerType]: Empty grouping bucket with identical manager class and grouping keys.
303
356
  """
304
357
  return GroupBucket(
305
358
  self._manager_class, self._group_by_keys, self._basis_data.none()
@@ -0,0 +1,38 @@
1
+ """Caching helpers for GeneralManager dependencies."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any
6
+
7
+ from general_manager.utils.public_api import build_module_dir, resolve_export
8
+
9
+ __all__ = [
10
+ "cached",
11
+ "CacheBackend",
12
+ "DependencyTracker",
13
+ "record_dependencies",
14
+ "remove_cache_key_from_index",
15
+ "invalidate_cache_key",
16
+ ]
17
+
18
+ _MODULE_MAP = {
19
+ "cached": ("general_manager.cache.cacheDecorator", "cached"),
20
+ "CacheBackend": ("general_manager.cache.cacheDecorator", "CacheBackend"),
21
+ "DependencyTracker": ("general_manager.cache.cacheTracker", "DependencyTracker"),
22
+ "record_dependencies": ("general_manager.cache.dependencyIndex", "record_dependencies"),
23
+ "remove_cache_key_from_index": ("general_manager.cache.dependencyIndex", "remove_cache_key_from_index"),
24
+ "invalidate_cache_key": ("general_manager.cache.dependencyIndex", "invalidate_cache_key"),
25
+ }
26
+
27
+
28
+ def __getattr__(name: str) -> Any:
29
+ return resolve_export(
30
+ name,
31
+ module_all=__all__,
32
+ module_map=_MODULE_MAP,
33
+ module_globals=globals(),
34
+ )
35
+
36
+
37
+ def __dir__() -> list[str]:
38
+ return build_module_dir(module_all=__all__, module_globals=globals())
@@ -1,4 +1,6 @@
1
- from typing import Any, Callable, Optional, Protocol, Set
1
+ """Helpers for caching GeneralManager computations with dependency tracking."""
2
+
3
+ from typing import Any, Callable, Optional, Protocol, Set, TypeVar, cast
2
4
  from functools import wraps
3
5
  from django.core.cache import cache as django_cache
4
6
  from general_manager.cache.cacheTracker import DependencyTracker
@@ -10,30 +12,34 @@ from general_manager.utils.makeCacheKey import make_cache_key
10
12
  class CacheBackend(Protocol):
11
13
  def get(self, key: str, default: Optional[Any] = None) -> Any:
12
14
  """
13
- Retrieves a value from the cache by key, returning a default if the key is not found.
15
+ Retrieve a value from the cache, falling back to a default.
14
16
 
15
- Args:
16
- key: The cache key to look up.
17
- default: Value to return if the key is not present in the cache.
17
+ Parameters:
18
+ key (str): Cache key identifying the stored entry.
19
+ default (Any | None): Value returned when the key is absent.
18
20
 
19
21
  Returns:
20
- The cached value if found; otherwise, the provided default.
22
+ Any: Cached value when available; otherwise, `default`.
21
23
  """
22
24
  ...
23
25
 
24
26
  def set(self, key: str, value: Any, timeout: Optional[int] = None) -> None:
25
27
  """
26
- Stores a value in the cache under the specified key with an optional expiration timeout.
28
+ Store a value in the cache with an optional expiration timeout.
29
+
30
+ Parameters:
31
+ key (str): Cache key identifying the stored entry.
32
+ value (Any): Object written to the cache.
33
+ timeout (int | None): Expiration in seconds; `None` stores the value indefinitely.
27
34
 
28
- Args:
29
- key: The cache key to associate with the value.
30
- value: The value to store in the cache.
31
- timeout: Optional expiration time in seconds. If None, the value is cached indefinitely.
35
+ Returns:
36
+ None
32
37
  """
33
38
  ...
34
39
 
35
40
 
36
41
  RecordFn = Callable[[str, Set[Dependency]], None]
42
+ FuncT = TypeVar("FuncT", bound=Callable[..., object])
37
43
 
38
44
  _SENTINEL = object()
39
45
 
@@ -42,16 +48,22 @@ def cached(
42
48
  timeout: Optional[int] = None,
43
49
  cache_backend: CacheBackend = django_cache,
44
50
  record_fn: RecordFn = record_dependencies,
45
- ) -> Callable:
51
+ ) -> Callable[[FuncT], FuncT]:
46
52
  """
47
- Decorator that caches function results and tracks their dependencies.
53
+ Cache a function call while registering its data dependencies.
54
+
55
+ Parameters:
56
+ timeout (int | None): Expiration in seconds for cached values; `None` stores results until invalidated.
57
+ cache_backend (CacheBackend): Backend used to read and write cached results.
58
+ record_fn (RecordFn): Callback invoked to persist dependency metadata when no timeout is defined.
48
59
 
49
- When applied to a function, this decorator caches the function's output using a generated cache key based on its arguments. It also tracks dependencies accessed during the function's execution and stores them alongside the cached result. On cache hits, previously stored dependencies are re-tracked to maintain dependency tracking continuity. If dependencies exist and no timeout is set, an external recording function is invoked to persist the dependency information.
60
+ Returns:
61
+ Callable: Decorator that wraps the target function with caching behaviour.
50
62
  """
51
63
 
52
- def decorator(func: Callable) -> Callable:
64
+ def decorator(func: FuncT) -> FuncT:
53
65
  @wraps(func)
54
- def wrapper(*args, **kwargs):
66
+ def wrapper(*args: object, **kwargs: object) -> object:
55
67
  key = make_cache_key(func, args, kwargs)
56
68
  deps_key = f"{key}:deps"
57
69
 
@@ -76,6 +88,6 @@ def cached(
76
88
 
77
89
  return result
78
90
 
79
- return wrapper
91
+ return cast(FuncT, wrapper)
80
92
 
81
93
  return decorator
@@ -1,11 +1,15 @@
1
+ """Context manager utilities for tracking cache dependencies per thread."""
2
+
1
3
  import threading
4
+ from types import TracebackType
5
+
2
6
  from general_manager.cache.dependencyIndex import (
3
- general_manager_name,
4
7
  Dependency,
5
8
  filter_type,
9
+ general_manager_name,
6
10
  )
7
11
 
8
- # Thread-lokale Variable zur Speicherung der Abhängigkeiten
12
+ # Thread-local storage for tracking dependencies
9
13
  _dependency_storage = threading.local()
10
14
 
11
15
 
@@ -14,12 +18,10 @@ class DependencyTracker:
14
18
  self,
15
19
  ) -> set[Dependency]:
16
20
  """
17
- Enters a new dependency tracking context and returns the set for collecting dependencies.
18
-
19
- Initializes thread-local storage for dependency tracking if not already present, supports nested contexts, and provides a set to accumulate dependencies at the current nesting level.
21
+ Enter a dependency tracking context and return the collector set.
20
22
 
21
23
  Returns:
22
- The set used to collect dependencies for the current context level.
24
+ set[Dependency]: Mutable set capturing dependencies discovered inside the context.
23
25
  """
24
26
  if not hasattr(_dependency_storage, "dependencies"):
25
27
  _dependency_storage._depth = 0
@@ -29,18 +31,29 @@ class DependencyTracker:
29
31
  _dependency_storage.dependencies.append(set())
30
32
  return _dependency_storage.dependencies[_dependency_storage._depth]
31
33
 
32
- def __exit__(self, exc_type, exc_val, exc_tb):
34
+ def __exit__(
35
+ self,
36
+ exc_type: type[BaseException] | None,
37
+ exc_val: BaseException | None,
38
+ exc_tb: TracebackType | None,
39
+ ) -> None:
33
40
  """
34
- Exits the dependency tracking context, managing cleanup for nested scopes.
41
+ Leave the dependency tracking context and perform nested-scope cleanup.
42
+
43
+ Parameters:
44
+ exc_type: Exception type raised within the context, if any.
45
+ exc_val: Exception instance raised within the context, if any.
46
+ exc_tb: Traceback generated by the exception, if any.
35
47
 
36
- If exiting the outermost context, removes all dependency tracking data from thread-local storage. Otherwise, decrements the nesting depth and removes the most recent dependency set.
48
+ Returns:
49
+ None
37
50
  """
38
51
  if hasattr(_dependency_storage, "dependencies"):
39
52
  if _dependency_storage._depth == 0:
40
53
  self.reset_thread_local_storage()
41
54
 
42
55
  else:
43
- # Ansonsten reduzieren wir nur die Tiefe
56
+ # For nested contexts only reduce depth and pop one level.
44
57
  _dependency_storage._depth -= 1
45
58
  _dependency_storage.dependencies.pop()
46
59
 
@@ -51,23 +64,29 @@ class DependencyTracker:
51
64
  identifier: str,
52
65
  ) -> None:
53
66
  """
54
- Records a dependency in all active dependency tracking contexts.
67
+ Record a dependency tuple in the active tracking scopes.
55
68
 
56
- Adds the specified dependency tuple to each set in the current stack of dependency tracking scopes, ensuring it is tracked at all nested levels.
69
+ Parameters:
70
+ class_name (str): Name of the GeneralManager subclass.
71
+ operation (filter_type): Operation being tracked, such as `filter` or `exclude`.
72
+ identifier (str): String representation of the lookup parameters.
73
+
74
+ Returns:
75
+ None
57
76
  """
58
77
  if hasattr(_dependency_storage, "dependencies"):
59
78
  for dep_set in _dependency_storage.dependencies[
60
79
  : _dependency_storage._depth + 1
61
80
  ]:
62
- dep_set: set[Dependency]
63
81
  dep_set.add((class_name, operation, identifier))
64
82
 
65
83
  @staticmethod
66
84
  def reset_thread_local_storage() -> None:
67
85
  """
68
- Resets the thread-local storage for dependency tracking.
86
+ Clear all dependency tracking data from thread-local storage.
69
87
 
70
- This method clears the thread-local storage, ensuring that all dependency tracking data is removed. It is useful for cleaning up after operations that may have modified the state of the tracker.
88
+ Returns:
89
+ None
71
90
  """
72
91
  if hasattr(_dependency_storage, "dependencies"):
73
92
  del _dependency_storage.dependencies