django-fast-treenode 2.1.4__py3-none-any.whl → 3.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- django_fast_treenode-3.0.1.dist-info/METADATA +203 -0
- django_fast_treenode-3.0.1.dist-info/RECORD +90 -0
- {django_fast_treenode-2.1.4.dist-info → django_fast_treenode-3.0.1.dist-info}/WHEEL +1 -1
- treenode/admin/__init__.py +2 -7
- treenode/admin/admin.py +138 -209
- treenode/admin/changelist.py +21 -39
- treenode/admin/exporter.py +170 -0
- treenode/admin/importer.py +171 -0
- treenode/admin/mixin.py +291 -0
- treenode/apps.py +41 -19
- treenode/cache.py +192 -303
- treenode/forms.py +45 -65
- treenode/managers/__init__.py +4 -20
- treenode/managers/managers.py +216 -0
- treenode/managers/queries.py +233 -0
- treenode/managers/tasks.py +167 -0
- treenode/models/__init__.py +8 -5
- treenode/models/decorators.py +54 -0
- treenode/models/factory.py +44 -68
- treenode/models/mixins/__init__.py +2 -1
- treenode/models/mixins/ancestors.py +44 -20
- treenode/models/mixins/children.py +33 -26
- treenode/models/mixins/descendants.py +33 -22
- treenode/models/mixins/family.py +25 -15
- treenode/models/mixins/logical.py +23 -21
- treenode/models/mixins/node.py +162 -104
- treenode/models/mixins/properties.py +22 -16
- treenode/models/mixins/roots.py +59 -15
- treenode/models/mixins/siblings.py +46 -43
- treenode/models/mixins/tree.py +212 -153
- treenode/models/mixins/update.py +154 -0
- treenode/models/models.py +365 -0
- treenode/settings.py +28 -0
- treenode/static/{treenode/css → css}/tree_widget.css +1 -1
- treenode/static/{treenode/css → css}/treenode_admin.css +43 -2
- treenode/static/css/treenode_tabs.css +51 -0
- treenode/static/js/lz-string.min.js +1 -0
- treenode/static/{treenode/js → js}/tree_widget.js +9 -23
- treenode/static/js/treenode_admin.js +531 -0
- treenode/static/vendors/jquery-ui/AUTHORS.txt +384 -0
- treenode/static/vendors/jquery-ui/LICENSE.txt +43 -0
- treenode/static/vendors/jquery-ui/external/jquery/jquery.js +10716 -0
- treenode/static/vendors/jquery-ui/images/ui-icons_444444_256x240.png +0 -0
- treenode/static/vendors/jquery-ui/images/ui-icons_555555_256x240.png +0 -0
- treenode/static/vendors/jquery-ui/images/ui-icons_777620_256x240.png +0 -0
- treenode/static/vendors/jquery-ui/images/ui-icons_777777_256x240.png +0 -0
- treenode/static/vendors/jquery-ui/images/ui-icons_cc0000_256x240.png +0 -0
- treenode/static/vendors/jquery-ui/images/ui-icons_ffffff_256x240.png +0 -0
- treenode/static/vendors/jquery-ui/index.html +297 -0
- treenode/static/vendors/jquery-ui/jquery-ui.css +438 -0
- treenode/static/vendors/jquery-ui/jquery-ui.js +5223 -0
- treenode/static/vendors/jquery-ui/jquery-ui.min.css +7 -0
- treenode/static/vendors/jquery-ui/jquery-ui.min.js +6 -0
- treenode/static/vendors/jquery-ui/jquery-ui.structure.css +16 -0
- treenode/static/vendors/jquery-ui/jquery-ui.structure.min.css +5 -0
- treenode/static/vendors/jquery-ui/jquery-ui.theme.css +439 -0
- treenode/static/vendors/jquery-ui/jquery-ui.theme.min.css +5 -0
- treenode/static/vendors/jquery-ui/package.json +82 -0
- treenode/templates/admin/treenode_changelist.html +25 -0
- treenode/templates/admin/treenode_import_export.html +85 -0
- treenode/templates/admin/treenode_rows.html +57 -0
- treenode/tests.py +3 -0
- treenode/urls.py +6 -27
- treenode/utils/__init__.py +0 -15
- treenode/utils/db/__init__.py +7 -0
- treenode/utils/db/compiler.py +114 -0
- treenode/utils/db/db_vendor.py +50 -0
- treenode/utils/db/service.py +84 -0
- treenode/utils/db/sqlcompat.py +60 -0
- treenode/utils/db/sqlquery.py +70 -0
- treenode/version.py +2 -2
- treenode/views/__init__.py +5 -0
- treenode/views/autoapi.py +91 -0
- treenode/views/autocomplete.py +52 -0
- treenode/views/children.py +41 -0
- treenode/views/common.py +23 -0
- treenode/views/crud.py +209 -0
- treenode/views/search.py +48 -0
- treenode/widgets.py +27 -44
- django_fast_treenode-2.1.4.dist-info/METADATA +0 -166
- django_fast_treenode-2.1.4.dist-info/RECORD +0 -63
- treenode/admin/mixins.py +0 -302
- treenode/managers/adjacency.py +0 -205
- treenode/managers/closure.py +0 -278
- treenode/models/adjacency.py +0 -342
- treenode/models/classproperty.py +0 -27
- treenode/models/closure.py +0 -122
- treenode/static/treenode/js/.gitkeep +0 -1
- treenode/static/treenode/js/treenode_admin.js +0 -131
- treenode/templates/admin/export_success.html +0 -26
- treenode/templates/admin/tree_node_changelist.html +0 -19
- treenode/templates/admin/tree_node_export.html +0 -27
- treenode/templates/admin/tree_node_import.html +0 -45
- treenode/templates/admin/tree_node_import_report.html +0 -32
- treenode/templates/widgets/tree_widget.css +0 -23
- treenode/utils/aid.py +0 -46
- treenode/utils/base16.py +0 -38
- treenode/utils/base36.py +0 -37
- treenode/utils/db.py +0 -116
- treenode/utils/exporter.py +0 -196
- treenode/utils/importer.py +0 -328
- treenode/utils/radix.py +0 -61
- treenode/views.py +0 -184
- {django_fast_treenode-2.1.4.dist-info → django_fast_treenode-3.0.1.dist-info/licenses}/LICENSE +0 -0
- {django_fast_treenode-2.1.4.dist-info → django_fast_treenode-3.0.1.dist-info}/top_level.txt +0 -0
- /treenode/static/{treenode → css}/.gitkeep +0 -0
- /treenode/static/{treenode/css → js}/.gitkeep +0 -0
treenode/apps.py
CHANGED
@@ -1,34 +1,56 @@
|
|
1
|
-
# -*- coding: utf-8 -*-
|
2
1
|
"""
|
3
|
-
TreeNode
|
2
|
+
TreeNode configuration definition module.
|
4
3
|
|
5
|
-
|
6
|
-
|
4
|
+
Customization:
|
5
|
+
- checks the correctness of the sorting fields
|
6
|
+
- checks the correctness of model inheritance
|
7
|
+
- starts asynchronous loading of node data into the cache
|
7
8
|
|
8
|
-
Version:
|
9
|
+
Version: 3.0.0
|
9
10
|
Author: Timur Kady
|
10
11
|
Email: timurkady@yandex.com
|
11
12
|
"""
|
12
13
|
|
13
|
-
|
14
|
-
import logging
|
15
|
-
from django.apps import AppConfig
|
16
|
-
from django.db.models.signals import post_migrate
|
17
|
-
|
18
|
-
logger = logging.getLogger(__name__)
|
14
|
+
from django.apps import apps, AppConfig
|
19
15
|
|
20
16
|
|
21
17
|
class TreeNodeConfig(AppConfig):
|
22
|
-
"""
|
18
|
+
"""Config Class."""
|
23
19
|
|
24
20
|
default_auto_field = "django.db.models.BigAutoField"
|
25
21
|
name = "treenode"
|
26
22
|
|
27
23
|
def ready(self):
|
28
|
-
"""
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
24
|
+
"""Ready method."""
|
25
|
+
from .models import TreeNodeModel
|
26
|
+
|
27
|
+
# Models checking
|
28
|
+
subclasses = [
|
29
|
+
m for m in apps.get_models()
|
30
|
+
if issubclass(m, TreeNodeModel) and m is not TreeNodeModel
|
31
|
+
]
|
32
|
+
|
33
|
+
for model in subclasses:
|
34
|
+
|
35
|
+
field_names = {f.name for f in model._meta.get_fields()}
|
36
|
+
|
37
|
+
# Check display_field is correct
|
38
|
+
if model.display_field is not None:
|
39
|
+
if model.display_field not in field_names:
|
40
|
+
raise ValueError(
|
41
|
+
f'Invalid display_field "{model.display_field}. "'
|
42
|
+
f'Available fields: {field_names}')
|
43
|
+
|
44
|
+
# Check sorting_field is correct
|
45
|
+
if model.sorting_field is not None:
|
46
|
+
if model.sorting_field not in field_names:
|
47
|
+
raise ValueError(
|
48
|
+
f'Invalid sorting_field "{model.sorting_field}. "'
|
49
|
+
f'Available fields: {field_names}')
|
50
|
+
|
51
|
+
# Check if Meta is a descendant of TreeNodeModel.Meta
|
52
|
+
if not issubclass(model.Meta, TreeNodeModel.Meta):
|
53
|
+
raise ValueError(
|
54
|
+
f'{model.__name__} must inherit Meta class ' +
|
55
|
+
'from TreeNodeModel.Meta.'
|
56
|
+
)
|
treenode/cache.py
CHANGED
@@ -1,352 +1,241 @@
|
|
1
1
|
# -*- coding: utf-8 -*-
|
2
2
|
"""
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
-
|
13
|
-
-
|
14
|
-
|
15
|
-
|
3
|
+
TreeCache: High-performance asynchronous in-memory cache with memory size limits
|
4
|
+
|
5
|
+
Description:
|
6
|
+
- FIFO-based cache eviction controlled by total memory footprint (in bytes)
|
7
|
+
- Background thread performs serialization and memory tracking
|
8
|
+
- Supports prefix-based invalidation and full cache reset
|
9
|
+
- Fast and flexible, built for caching arbitrary Python objects
|
10
|
+
|
11
|
+
Usage:
|
12
|
+
- Call `set(key, value)` to queue data for caching
|
13
|
+
- Background worker will serialize and insert it
|
14
|
+
- Call `get(key)` to retrieve and deserialize cached values
|
15
|
+
- Use `invalidate(prefix)` to remove all keys with the given prefix
|
16
|
+
- Use `clear()` to fully reset the cache
|
17
|
+
- Don't forget to call `start_worker()` on initialization, and `stop_worker()`
|
18
|
+
on shutdown
|
19
|
+
|
20
|
+
Dependencies:
|
21
|
+
- cloudpickle (faster and more flexible than standard pickle)
|
22
|
+
|
23
|
+
Version: 3.0.0
|
16
24
|
Author: Timur Kady
|
17
25
|
Email: timurkady@yandex.com
|
18
26
|
"""
|
19
27
|
|
20
|
-
import hashlib
|
21
|
-
import msgpack
|
22
28
|
import sys
|
29
|
+
import msgpack
|
30
|
+
import functools
|
31
|
+
import hashlib
|
23
32
|
import threading
|
24
|
-
|
25
|
-
from
|
26
|
-
from
|
27
|
-
from functools import lru_cache
|
28
|
-
from functools import wraps
|
29
|
-
|
30
|
-
|
31
|
-
# ---------------------------------------------------
|
32
|
-
# Utilities
|
33
|
-
# ---------------------------------------------------
|
34
|
-
|
35
|
-
_DIGITS = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
36
|
-
_CLEARINT_THESHOLD = 0.8
|
37
|
-
_EVICT_INTERVAL = 50
|
38
|
-
|
39
|
-
|
40
|
-
@lru_cache(maxsize=1000)
|
41
|
-
def to_base36(num):
|
42
|
-
"""
|
43
|
-
Convert an integer to a base36 string.
|
44
|
-
|
45
|
-
For example: 10 -> 'A', 35 -> 'Z', 36 -> '10', etc.
|
46
|
-
"""
|
47
|
-
if num == 0:
|
48
|
-
return '0'
|
49
|
-
sign = '-' if num < 0 else ''
|
50
|
-
num = abs(num)
|
51
|
-
result = []
|
52
|
-
while num:
|
53
|
-
num, rem = divmod(num, 36)
|
54
|
-
result.append(_DIGITS[rem])
|
55
|
-
return sign + ''.join(reversed(result))
|
33
|
+
import time
|
34
|
+
from collections import deque, defaultdict
|
35
|
+
from typing import Any, Callable
|
56
36
|
|
37
|
+
from .settings import CACHE_LIMIT
|
57
38
|
|
58
|
-
# ---------------------------------------------------
|
59
|
-
# Caching
|
60
|
-
# ---------------------------------------------------
|
61
39
|
|
62
40
|
class TreeCache:
|
63
|
-
"""
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
self.cache = caches['treenode']
|
96
|
-
else:
|
97
|
-
# We use our dictionary as a backend.
|
98
|
-
self.cache = OrderedDict()
|
99
|
-
|
100
|
-
self.order = deque() # Queue for FIFO implementation.
|
101
|
-
self.total_size = 0 # Current cache size in bytes.
|
102
|
-
self.lock = threading.Lock() # Lock for thread safety.
|
103
|
-
|
104
|
-
# Additional index for fast search of keys by prefix
|
105
|
-
# Format: {prefix: {key1, key2, ...}}
|
106
|
-
self.prefix_index = defaultdict(set)
|
107
|
-
# Dictionary to store the sizes of each key (key -> size in bytes)
|
108
|
-
self.sizes = {}
|
109
|
-
# Dictionary to store the prefix for each key to avoid repeated
|
110
|
-
# splitting
|
111
|
-
self.key_prefix = {}
|
112
|
-
|
113
|
-
# Counter for number of set operations for periodic eviction
|
114
|
-
self._set_counter = 0
|
115
|
-
# Evict cache every _evict_interval set operations when using external
|
116
|
-
# backend
|
117
|
-
self._evict_interval = _EVICT_INTERVAL
|
118
|
-
|
119
|
-
self._initialized = True
|
120
|
-
|
121
|
-
def generate_cache_key(self, label, func_name, unique_id, *args, **kwargs):
|
122
|
-
"""
|
123
|
-
Generate a cache key.
|
124
|
-
|
125
|
-
<label>_<func_name>_<unique_id>_<hash>
|
126
|
-
"""
|
127
|
-
# If using custom dict backend, use simple key generation without
|
128
|
-
# serialization.
|
129
|
-
if isinstance(self.cache, dict):
|
130
|
-
sorted_kwargs = sorted(kwargs.items())
|
131
|
-
return f"{label}_{func_name}_{unique_id}_{args}_{sorted_kwargs}"
|
132
|
-
else:
|
133
|
-
try:
|
134
|
-
# Using msgpack for fast binary representation of arguments
|
135
|
-
sorted_kwargs = sorted(kwargs.items())
|
136
|
-
params_bytes = msgpack.packb(
|
137
|
-
(args, sorted_kwargs), use_bin_type=True)
|
138
|
-
except Exception:
|
139
|
-
params_bytes = repr((args, kwargs)).encode('utf-8')
|
140
|
-
# Using MD5 for speed (no cryptographic strength)
|
141
|
-
hash_value = hashlib.md5(params_bytes).hexdigest()
|
142
|
-
return f"{label}_{func_name}_{unique_id}_{hash_value}"
|
143
|
-
|
144
|
-
def get_obj_size(self, value):
|
41
|
+
"""Tree Cache Class."""
|
42
|
+
|
43
|
+
def __init__(self):
|
44
|
+
"""Initialize TreeCache with background worker and memory limit."""
|
45
|
+
self.max_size = CACHE_LIMIT
|
46
|
+
|
47
|
+
self.cache = {} # key -> serialized value
|
48
|
+
self.order = deque() # FIFO order tracking
|
49
|
+
self.queue_index = {}
|
50
|
+
self.sizes = {} # key -> size in bytes
|
51
|
+
self.total_size = 0 # total size in bytes
|
52
|
+
self.prefix_index = defaultdict(set) # prefix -> keys
|
53
|
+
self.key_prefix = {} # key -> prefix
|
54
|
+
|
55
|
+
self.queue = deque() # write queue (key, value)
|
56
|
+
self.queue_lock = threading.Lock()
|
57
|
+
|
58
|
+
self.stop_event = threading.Event()
|
59
|
+
self.worker = threading.Thread(target=self._worker_loop, daemon=True)
|
60
|
+
self.start_worker()
|
61
|
+
|
62
|
+
def start_worker(self):
|
63
|
+
"""Start the background worker thread."""
|
64
|
+
if not self.worker.is_alive():
|
65
|
+
self.worker.start()
|
66
|
+
|
67
|
+
def stop_worker(self):
|
68
|
+
"""Stop the background worker thread."""
|
69
|
+
self.stop_event.set()
|
70
|
+
self.worker.join()
|
71
|
+
|
72
|
+
def _estimate_size(self, value):
|
145
73
|
"""
|
146
74
|
Determine the size of the object in bytes.
|
147
75
|
|
148
76
|
If the value is already in bytes or bytearray, simply returns its
|
149
77
|
length. Otherwise, uses sys.getsizeof for an approximate estimate.
|
150
78
|
"""
|
151
|
-
|
152
|
-
return len(value)
|
153
|
-
|
79
|
+
try:
|
80
|
+
return int(len(msgpack.packb(value)) * 2.5)
|
81
|
+
except Exception:
|
82
|
+
return sys.getsizeof(value)
|
154
83
|
|
155
|
-
def
|
84
|
+
def _worker_loop(self):
|
156
85
|
"""
|
157
|
-
|
86
|
+
Loop Worker.
|
158
87
|
|
159
|
-
|
160
|
-
|
88
|
+
Background worker that processes the cache queue,
|
89
|
+
serializes values, and enforces memory constraints.
|
161
90
|
"""
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
else:
|
167
|
-
try:
|
168
|
-
stored_value = msgpack.packb(value, use_bin_type=True)
|
169
|
-
except Exception:
|
170
|
-
stored_value = value
|
171
|
-
|
172
|
-
# Calculate the size of the stored value
|
173
|
-
if isinstance(stored_value, (bytes, bytearray)):
|
174
|
-
size = len(stored_value)
|
175
|
-
else:
|
176
|
-
size = sys.getsizeof(stored_value)
|
177
|
-
|
178
|
-
# Store the value in the cache backend
|
179
|
-
if isinstance(self.cache, dict):
|
180
|
-
self.cache[key] = stored_value
|
181
|
-
else:
|
182
|
-
self.cache.set(key, stored_value)
|
183
|
-
|
184
|
-
# Update internal structures under lock
|
185
|
-
with self.lock:
|
186
|
-
if key in self.sizes:
|
187
|
-
# If the key already exists, adjust the total size
|
188
|
-
old_size = self.sizes[key]
|
189
|
-
self.total_size -= old_size
|
190
|
-
else:
|
191
|
-
# New key: add to FIFO queue
|
192
|
-
self.order.append(key)
|
193
|
-
# Compute prefix once and store it in key_prefix
|
194
|
-
if "_" in key:
|
195
|
-
prefix = key.split('_', 1)[0] + "_"
|
91
|
+
while not self.stop_event.is_set():
|
92
|
+
with self.queue_lock:
|
93
|
+
if self.queue:
|
94
|
+
key, value = self.queue.popleft()
|
196
95
|
else:
|
197
|
-
|
96
|
+
key = value = None
|
97
|
+
|
98
|
+
if key is not None:
|
99
|
+
obj_size = self._estimate_size(value)
|
100
|
+
self.cache[key] = value
|
101
|
+
self.order.append(key)
|
102
|
+
self.sizes[key] = obj_size
|
103
|
+
self.total_size += obj_size
|
104
|
+
|
105
|
+
prefix = key.split("|", 1)[0] + "|"
|
198
106
|
self.key_prefix[key] = prefix
|
199
107
|
self.prefix_index[prefix].add(key)
|
200
|
-
# Save the size for this key and update total_size
|
201
|
-
self.sizes[key] = size
|
202
|
-
self.total_size += size
|
203
108
|
|
204
|
-
|
205
|
-
|
109
|
+
if self.total_size > self.max_size:
|
110
|
+
self._evict_cache()
|
111
|
+
else:
|
112
|
+
time.sleep(0.0025)
|
206
113
|
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
self._evict_cache()
|
114
|
+
def set(self, key: str, value: Any):
|
115
|
+
"""Queue a key-value pair for caching. Actual insertion is async."""
|
116
|
+
with self.queue_lock:
|
117
|
+
self.queue.append((key, value))
|
118
|
+
self.queue_index[key] = value
|
213
119
|
|
214
|
-
def get(self, key):
|
120
|
+
def get(self, key: str) -> Any:
|
215
121
|
"""
|
216
|
-
Get
|
122
|
+
Get data from the cache.
|
217
123
|
|
218
|
-
|
219
|
-
|
220
|
-
Django cache-backend) and return it without any additional operations.
|
124
|
+
Retrieve a value from the cache and deserialize it.
|
125
|
+
Returns None if key is not present or deserialization fails.
|
221
126
|
"""
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
if packed_value is None:
|
227
|
-
return None
|
228
|
-
try:
|
229
|
-
return msgpack.unpackb(packed_value, raw=False)
|
230
|
-
except Exception:
|
231
|
-
# If unpacking fails, return what we got
|
232
|
-
return packed_value
|
127
|
+
# Step 1. Try get value from the cache
|
128
|
+
from_cache = self.cache.get(key)
|
129
|
+
if from_cache is not None:
|
130
|
+
return from_cache
|
233
131
|
|
234
|
-
|
235
|
-
|
236
|
-
Invalidate model cache.
|
132
|
+
# Step 2. Search in pending queue
|
133
|
+
return self.queue_index.get(key)
|
237
134
|
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
135
|
+
def _evict_cache(self):
|
136
|
+
"""Remove oldest entries from the cache and auxiliary index."""
|
137
|
+
while self.total_size > self.max_size and self.order:
|
138
|
+
oldest = self.order.popleft()
|
139
|
+
self.total_size -= self.sizes.pop(oldest, 0)
|
140
|
+
self.cache.pop(oldest, None)
|
141
|
+
prefix = self.key_prefix.pop(oldest, None)
|
142
|
+
if prefix:
|
143
|
+
self.prefix_index[prefix].discard(oldest)
|
144
|
+
if hasattr(self, "queue_index"):
|
145
|
+
self.queue_index.pop(oldest, None)
|
146
|
+
|
147
|
+
def invalidate(self, prefix: str):
|
242
148
|
"""
|
243
|
-
prefix
|
244
|
-
with self.lock:
|
245
|
-
keys_to_remove = self.prefix_index.get(prefix, set())
|
246
|
-
if not keys_to_remove:
|
247
|
-
return
|
248
|
-
|
249
|
-
# Remove keys from main cache and update total_size via sizes
|
250
|
-
# dictionary
|
251
|
-
if isinstance(self.cache, dict):
|
252
|
-
for key in keys_to_remove:
|
253
|
-
self.cache.pop(key, None)
|
254
|
-
size = self.sizes.pop(key, 0)
|
255
|
-
self.total_size -= size
|
256
|
-
# Remove key from key_prefix as well
|
257
|
-
self.key_prefix.pop(key, None)
|
258
|
-
else:
|
259
|
-
# If using Django backend
|
260
|
-
self.cache.delete_many(list(keys_to_remove))
|
261
|
-
for key in keys_to_remove:
|
262
|
-
size = self.sizes.pop(key, 0)
|
263
|
-
self.total_size -= size
|
264
|
-
self.key_prefix.pop(key, None)
|
149
|
+
Invalidate all keys with the given prefix (e.g. "node_").
|
265
150
|
|
266
|
-
|
267
|
-
|
268
|
-
|
151
|
+
Also purges pending items in the queue with the same prefix.
|
152
|
+
"""
|
153
|
+
prefix = f"{prefix}|"
|
154
|
+
keys_to_remove = self.prefix_index.pop(prefix, set())
|
155
|
+
for key in keys_to_remove:
|
156
|
+
self.total_size -= self.sizes.pop(key, 0)
|
157
|
+
self.cache.pop(key, None)
|
158
|
+
self.key_prefix.pop(key, None)
|
159
|
+
try:
|
160
|
+
self.order.remove(key)
|
161
|
+
except ValueError:
|
162
|
+
pass
|
163
|
+
with self.queue_lock:
|
164
|
+
self.queue = deque(
|
165
|
+
[(k, v) for k, v in self.queue if not k.startswith(prefix)])
|
269
166
|
|
270
167
|
def clear(self):
|
271
|
-
"""
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
self.
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
168
|
+
"""Fully reset the cache, indexes, and the background queue."""
|
169
|
+
self.cache.clear()
|
170
|
+
self.order.clear()
|
171
|
+
self.sizes.clear()
|
172
|
+
self.total_size = 0
|
173
|
+
self.prefix_index.clear()
|
174
|
+
self.key_prefix.clear()
|
175
|
+
with self.queue_lock:
|
176
|
+
self.queue.clear()
|
177
|
+
|
178
|
+
def info(self) -> dict:
|
179
|
+
"""Return runtime statistics for monitoring and diagnostics."""
|
180
|
+
with self.queue_lock:
|
181
|
+
queued = len(self.queue)
|
182
|
+
|
183
|
+
return {
|
184
|
+
"total_keys": len(self.cache),
|
185
|
+
"queued_items": queued,
|
186
|
+
"total_size": int(10*self.total_size/(1024*1024))/10,
|
187
|
+
"max_size": int(10*self.max_size/(1024*1024))/10,
|
188
|
+
"fill_percent": round(self.total_size / self.max_size * 100, 2) if self.max_size else 0.0, # noqa: D501
|
189
|
+
"prefixes": len(self.prefix_index),
|
190
|
+
"running": not self.stop_event.is_set(),
|
191
|
+
"thread_alive": self.worker.is_alive()
|
192
|
+
}
|
193
|
+
|
194
|
+
def generate_cache_key(self, label: str, func_name: str, unique_id: int,
|
195
|
+
args: tuple, kwargs: dict) -> str:
|
284
196
|
"""
|
285
|
-
|
197
|
+
Generate a unique cache key for a function call.
|
286
198
|
|
287
|
-
|
288
|
-
|
199
|
+
- Fast-path: for simple positional arguments, avoid serialization.
|
200
|
+
- Full-path: use pickle+blake2b hash for complex inputs.
|
289
201
|
"""
|
290
|
-
|
291
|
-
|
292
|
-
target_size = _CLEARINT_THESHOLD * self.cache_limit
|
293
|
-
while self.total_size > target_size and self.order:
|
294
|
-
# Extract the oldest key from the queue (FIFO)
|
295
|
-
key = self.order.popleft()
|
296
|
-
|
297
|
-
# Delete entry from backend cache
|
298
|
-
if isinstance(self.cache, dict):
|
299
|
-
self.cache.pop(key, None)
|
300
|
-
else:
|
301
|
-
self.cache.delete(key)
|
202
|
+
if not args and not kwargs:
|
203
|
+
return f"{label}|{func_name}:{unique_id}:empty"
|
302
204
|
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
205
|
+
try:
|
206
|
+
key_data = (args, kwargs)
|
207
|
+
packed = msgpack.packb(key_data)
|
208
|
+
key = hashlib.blake2b(packed, digest_size=8).hexdigest()
|
209
|
+
return f"{label}|{func_name}:{unique_id}:{key}"
|
210
|
+
except Exception:
|
211
|
+
fallback = repr((args, kwargs)).encode()
|
212
|
+
key = hashlib.sha1(fallback).hexdigest()
|
213
|
+
return f"{label}|{func_name}:{unique_id}:{key}"
|
307
214
|
|
308
|
-
# Retrieve prefix from key_prefix without splitting
|
309
|
-
prefix = self.key_prefix.pop(key, None)
|
310
|
-
if prefix is not None:
|
311
|
-
self.prefix_index[prefix].discard(key)
|
312
|
-
if not self.prefix_index[prefix]:
|
313
|
-
del self.prefix_index[prefix]
|
314
215
|
|
315
|
-
|
316
|
-
# Global cache object (unique for the system)
|
216
|
+
# Global singleton cache instance
|
317
217
|
treenode_cache = TreeCache()
|
318
218
|
|
319
219
|
|
320
|
-
|
321
|
-
|
322
|
-
|
220
|
+
def cached_method(func: Callable) -> Callable:
|
221
|
+
"""
|
222
|
+
Decorate method.
|
223
|
+
|
224
|
+
Method decorator that caches results on a per-instance basis using
|
225
|
+
TreeCache. The cache key includes the method, arguments, and instance ID.
|
226
|
+
"""
|
227
|
+
cache = treenode_cache
|
323
228
|
|
324
|
-
|
325
|
-
"""Decorate instance or class methods."""
|
326
|
-
@wraps(func)
|
229
|
+
@functools.wraps(func)
|
327
230
|
def wrapper(self, *args, **kwargs):
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
label
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
label,
|
339
|
-
func.__name__,
|
340
|
-
unique_id,
|
341
|
-
*args,
|
342
|
-
**kwargs
|
343
|
-
)
|
344
|
-
value = cache.get(cache_key)
|
345
|
-
if value is None:
|
346
|
-
value = func(self, *args, **kwargs)
|
347
|
-
cache.set(cache_key, value)
|
348
|
-
return value
|
231
|
+
label = self._meta.label
|
232
|
+
unique_id = getattr(self, "pk", None) or id(self)
|
233
|
+
func_name = func.__name__
|
234
|
+
key = cache.generate_cache_key(
|
235
|
+
label, func_name, unique_id, args, kwargs)
|
236
|
+
result = cache.get(key)
|
237
|
+
if result is None:
|
238
|
+
result = func(self, *args, **kwargs)
|
239
|
+
cache.set(key, result)
|
240
|
+
return result
|
349
241
|
return wrapper
|
350
|
-
|
351
|
-
|
352
|
-
# The End
|