django-fast-treenode 2.0.11__py3-none-any.whl → 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {django_fast_treenode-2.0.11.dist-info → django_fast_treenode-2.1.1.dist-info}/LICENSE +2 -2
- django_fast_treenode-2.1.1.dist-info/METADATA +158 -0
- django_fast_treenode-2.1.1.dist-info/RECORD +64 -0
- {django_fast_treenode-2.0.11.dist-info → django_fast_treenode-2.1.1.dist-info}/WHEEL +1 -1
- treenode/admin/__init__.py +9 -0
- treenode/admin/admin.py +295 -0
- treenode/admin/changelist.py +65 -0
- treenode/admin/mixins.py +302 -0
- treenode/apps.py +12 -1
- treenode/cache.py +2 -2
- treenode/forms.py +8 -10
- treenode/managers/__init__.py +21 -0
- treenode/managers/adjacency.py +203 -0
- treenode/managers/closure.py +278 -0
- treenode/models/__init__.py +2 -1
- treenode/models/adjacency.py +343 -0
- treenode/models/classproperty.py +3 -0
- treenode/models/closure.py +23 -24
- treenode/models/factory.py +12 -2
- treenode/models/mixins/__init__.py +23 -0
- treenode/models/mixins/ancestors.py +65 -0
- treenode/models/mixins/children.py +81 -0
- treenode/models/mixins/descendants.py +66 -0
- treenode/models/mixins/family.py +63 -0
- treenode/models/mixins/logical.py +68 -0
- treenode/models/mixins/node.py +210 -0
- treenode/models/mixins/properties.py +156 -0
- treenode/models/mixins/roots.py +96 -0
- treenode/models/mixins/siblings.py +99 -0
- treenode/models/mixins/tree.py +344 -0
- treenode/signals.py +26 -0
- treenode/static/treenode/css/tree_widget.css +201 -31
- treenode/static/treenode/css/treenode_admin.css +48 -41
- treenode/static/treenode/js/tree_widget.js +269 -131
- treenode/static/treenode/js/treenode_admin.js +131 -171
- treenode/templates/admin/tree_node_changelist.html +6 -0
- treenode/templates/admin/treenode_ajax_rows.html +7 -0
- treenode/tests/tests.py +488 -0
- treenode/urls.py +10 -6
- treenode/utils/__init__.py +2 -0
- treenode/utils/aid.py +46 -0
- treenode/utils/base16.py +38 -0
- treenode/utils/base36.py +3 -1
- treenode/utils/db.py +116 -0
- treenode/utils/exporter.py +2 -0
- treenode/utils/importer.py +0 -1
- treenode/utils/radix.py +61 -0
- treenode/version.py +2 -2
- treenode/views.py +118 -43
- treenode/widgets.py +91 -43
- django_fast_treenode-2.0.11.dist-info/METADATA +0 -698
- django_fast_treenode-2.0.11.dist-info/RECORD +0 -42
- treenode/admin.py +0 -439
- treenode/docs/Documentation +0 -636
- treenode/managers.py +0 -419
- treenode/models/proxy.py +0 -669
- {django_fast_treenode-2.0.11.dist-info → django_fast_treenode-2.1.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,278 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
"""
|
3
|
+
Closure Table Manager and QuerySet
|
4
|
+
|
5
|
+
This module defines custom managers and query sets for the ClosureModel.
|
6
|
+
It includes optimized bulk operations for handling hierarchical data
|
7
|
+
using the Closure Table approach.
|
8
|
+
|
9
|
+
Version: 2.1.0
|
10
|
+
Author: Timur Kady
|
11
|
+
Email: timurkady@yandex.com
|
12
|
+
"""
|
13
|
+
|
14
|
+
from collections import deque, defaultdict
|
15
|
+
from django.db import models, transaction
|
16
|
+
|
17
|
+
|
18
|
+
# ----------------------------------------------------------------------------
|
19
|
+
# Closere Model
|
20
|
+
# ----------------------------------------------------------------------------
|
21
|
+
|
22
|
+
|
23
|
+
class ClosureQuerySet(models.QuerySet):
|
24
|
+
"""QuerySet для ClosureModel."""
|
25
|
+
|
26
|
+
def sort_nodes(self, node_list):
|
27
|
+
"""
|
28
|
+
Sort nodes topologically.
|
29
|
+
|
30
|
+
Returns a list of nodes sorted from roots to leaves.
|
31
|
+
A node is considered a root if its tn_parent is None or its
|
32
|
+
parent is not in node_list.
|
33
|
+
"""
|
34
|
+
visited = set() # Will store the ids of already processed nodes
|
35
|
+
result = []
|
36
|
+
# Set of node ids included in the original list
|
37
|
+
node_ids = {node.id for node in node_list}
|
38
|
+
|
39
|
+
def dfs(node):
|
40
|
+
if node.id in visited:
|
41
|
+
return
|
42
|
+
# If there is a parent and it is included in node_list, then
|
43
|
+
# process it first
|
44
|
+
if node.tn_parent and node.tn_parent_id in node_ids:
|
45
|
+
dfs(node.tn_parent)
|
46
|
+
visited.add(node.id)
|
47
|
+
result.append(node)
|
48
|
+
|
49
|
+
for n in node_list:
|
50
|
+
dfs(n)
|
51
|
+
|
52
|
+
return result
|
53
|
+
|
54
|
+
@transaction.atomic
|
55
|
+
def bulk_create(self, objs, batch_size=1000, *args, **kwargs):
|
56
|
+
"""Insert new nodes in bulk."""
|
57
|
+
result = []
|
58
|
+
|
59
|
+
# 1. Topological sorting of nodes
|
60
|
+
objs = self.sort_nodes(objs)
|
61
|
+
|
62
|
+
# 1. Create self-links for all nodes: (node, node, 0, node).
|
63
|
+
self_links = [
|
64
|
+
self.model(parent=obj, child=obj, depth=0, node=obj)
|
65
|
+
for obj in objs
|
66
|
+
]
|
67
|
+
result.extend(
|
68
|
+
super(ClosureQuerySet, self).bulk_create(
|
69
|
+
self_links, batch_size, *args, **kwargs
|
70
|
+
)
|
71
|
+
)
|
72
|
+
|
73
|
+
# 2. We form a display: parent id -> list of its children.
|
74
|
+
children_map = defaultdict(list)
|
75
|
+
for obj in objs:
|
76
|
+
if obj.tn_parent_id:
|
77
|
+
children_map[obj.tn_parent_id].append(obj)
|
78
|
+
|
79
|
+
# 3. We try to determine the root nodes (with tn_parent == None).
|
80
|
+
root_nodes = [obj for obj in objs if obj.tn_parent is None]
|
81
|
+
|
82
|
+
# If there are no root nodes, then we insert a subtree.
|
83
|
+
if not root_nodes:
|
84
|
+
# Define the "top" nodes of the subtree:
|
85
|
+
# those whose parent is not included in the list of inserted objects
|
86
|
+
objs_ids = {obj.id for obj in objs if obj.id is not None}
|
87
|
+
top_nodes = [
|
88
|
+
obj for obj in objs if obj.tn_parent_id not in objs_ids
|
89
|
+
]
|
90
|
+
|
91
|
+
# For each such node, if the parent exists, get the closure records
|
92
|
+
# for the parent and add new records for (ancestor -> node) with
|
93
|
+
# depth = ancestor.depth + 1.
|
94
|
+
new_entries = []
|
95
|
+
for node in top_nodes:
|
96
|
+
if node.tn_parent_id:
|
97
|
+
parent_closures = self.model.objects.filter(
|
98
|
+
child_id=node.tn_parent_id
|
99
|
+
)
|
100
|
+
for ancestor in parent_closures:
|
101
|
+
new_entries.append(
|
102
|
+
self.model(
|
103
|
+
parent=ancestor.parent,
|
104
|
+
child=node,
|
105
|
+
depth=ancestor.depth + 1
|
106
|
+
)
|
107
|
+
)
|
108
|
+
if new_entries:
|
109
|
+
result.extend(
|
110
|
+
super(ClosureQuerySet, self).bulk_create(
|
111
|
+
new_entries, batch_size, *args, **kwargs
|
112
|
+
)
|
113
|
+
|
114
|
+
)
|
115
|
+
|
116
|
+
# Set the top-level nodes of the subtree as the starting ones for
|
117
|
+
# traversal.
|
118
|
+
current_nodes = top_nodes
|
119
|
+
else:
|
120
|
+
current_nodes = root_nodes
|
121
|
+
|
122
|
+
def process_level(current_nodes):
|
123
|
+
"""Recursive function for traversing levels."""
|
124
|
+
next_level = []
|
125
|
+
new_entries = []
|
126
|
+
for node in current_nodes:
|
127
|
+
# For the current node, we get all the closure records
|
128
|
+
# (its ancestors).
|
129
|
+
ancestors = self.model.objects.filter(child=node)
|
130
|
+
for child in children_map.get(node.id, []):
|
131
|
+
for ancestor in ancestors:
|
132
|
+
new_entries.append(
|
133
|
+
self.model(
|
134
|
+
parent=ancestor.parent,
|
135
|
+
child=child,
|
136
|
+
depth=ancestor.depth + 1
|
137
|
+
)
|
138
|
+
)
|
139
|
+
next_level.append(child)
|
140
|
+
if new_entries:
|
141
|
+
result.extend(
|
142
|
+
super(ClosureQuerySet, self).bulk_create(
|
143
|
+
new_entries, batch_size, *args, **kwargs
|
144
|
+
)
|
145
|
+
)
|
146
|
+
if next_level:
|
147
|
+
process_level(next_level)
|
148
|
+
|
149
|
+
# 4. Run traversing levels.
|
150
|
+
process_level(current_nodes)
|
151
|
+
return result
|
152
|
+
|
153
|
+
@transaction.atomic
|
154
|
+
def bulk_update(self, objs, fields=None, batch_size=1000):
|
155
|
+
"""
|
156
|
+
Update the closure table for objects whose tn_parent has changed.
|
157
|
+
|
158
|
+
It is assumed that all objects from the objs list are already in the
|
159
|
+
closure table, but their links (both for parents and for children) may
|
160
|
+
have changed.
|
161
|
+
|
162
|
+
Algorithm:
|
163
|
+
1. Form a mapping: parent id → list of its children.
|
164
|
+
2. Determine the root nodes of the subtree to be updated:
|
165
|
+
– A node is considered a root if its tn_parent is None or its
|
166
|
+
parent is not in objs.
|
167
|
+
3. For each root node, if there is an external parent, get its
|
168
|
+
closure from the database.
|
169
|
+
Then form closure records for the node (all external links with
|
170
|
+
increased depth and self-reference).
|
171
|
+
4. Using BFS, traverse the subtree: for each node, for each of its
|
172
|
+
children, create records using parent records (increased by 1) and add
|
173
|
+
a self-reference for the child.
|
174
|
+
5. Remove old closure records for objects from objs and save new ones in
|
175
|
+
batches.
|
176
|
+
"""
|
177
|
+
# 1. Topological sorting of nodes
|
178
|
+
objs = self.sort_nodes(objs)
|
179
|
+
|
180
|
+
# 2. Let's build a mapping: parent id → list of children
|
181
|
+
children_map = defaultdict(list)
|
182
|
+
for obj in objs:
|
183
|
+
if obj.tn_parent_id:
|
184
|
+
children_map[obj.tn_parent_id].append(obj)
|
185
|
+
|
186
|
+
# Set of id's of objects to be updated
|
187
|
+
objs_ids = {obj.id for obj in objs}
|
188
|
+
|
189
|
+
# 3. Determine the root nodes of the updated subtree:
|
190
|
+
# A node is considered root if its tn_parent is either None or its
|
191
|
+
# parent is not in objs.
|
192
|
+
roots = [
|
193
|
+
obj for obj in objs
|
194
|
+
if (obj.tn_parent is None) or (obj.tn_parent_id not in objs_ids)
|
195
|
+
]
|
196
|
+
|
197
|
+
# List for accumulating new closure records
|
198
|
+
new_closure_entries = []
|
199
|
+
|
200
|
+
# Queue for BFS: each element is a tuple (node, node_closure), where
|
201
|
+
# node_closure is a list of closure entries for that node.
|
202
|
+
queue = deque()
|
203
|
+
for node in roots:
|
204
|
+
if node.tn_parent_id:
|
205
|
+
# Get the closure of the external parent from the database
|
206
|
+
external_ancestors = list(
|
207
|
+
self.model.objects.filter(child_id=node.tn_parent_id)
|
208
|
+
.values('parent_id', 'depth')
|
209
|
+
)
|
210
|
+
# For each ancestor found, create an entry for node with
|
211
|
+
# depth+1
|
212
|
+
node_closure = [
|
213
|
+
self.model(
|
214
|
+
parent_id=entry['parent_id'],
|
215
|
+
child=node,
|
216
|
+
depth=entry['depth'] + 1
|
217
|
+
)
|
218
|
+
for entry in external_ancestors
|
219
|
+
]
|
220
|
+
else:
|
221
|
+
node_closure = []
|
222
|
+
# Add self-reference (node → node, depth 0)
|
223
|
+
node_closure.append(
|
224
|
+
self.model(parent=node, child=node, depth=0, node=node)
|
225
|
+
)
|
226
|
+
|
227
|
+
# Save records for the current node and put them in a queue for
|
228
|
+
# processing its subtree
|
229
|
+
new_closure_entries.extend(node_closure)
|
230
|
+
queue.append((node, node_closure))
|
231
|
+
|
232
|
+
# 4. BFS subtree traversal: for each node, create a closure for its
|
233
|
+
# children
|
234
|
+
while queue:
|
235
|
+
parent_node, parent_closure = queue.popleft()
|
236
|
+
for child in children_map.get(parent_node.id, []):
|
237
|
+
# For the child, new closure records:
|
238
|
+
# for each parent record, create (ancestor -> child) with
|
239
|
+
# depth+1
|
240
|
+
child_closure = [
|
241
|
+
self.model(
|
242
|
+
parent_id=entry.parent_id,
|
243
|
+
child=child,
|
244
|
+
depth=entry.depth + 1
|
245
|
+
)
|
246
|
+
for entry in parent_closure
|
247
|
+
]
|
248
|
+
# Add a self-link for the child
|
249
|
+
child_closure.append(
|
250
|
+
self.model(parent=child, child=child, depth=0)
|
251
|
+
)
|
252
|
+
|
253
|
+
new_closure_entries.extend(child_closure)
|
254
|
+
queue.append((child, child_closure))
|
255
|
+
|
256
|
+
# 5. Remove old closure records for updatable objects
|
257
|
+
self.model.objects.filter(child_id__in=objs_ids).delete()
|
258
|
+
|
259
|
+
# 6. Save new records in batches
|
260
|
+
super(ClosureQuerySet, self).bulk_create(new_closure_entries)
|
261
|
+
|
262
|
+
|
263
|
+
class ClosureModelManager(models.Manager):
|
264
|
+
"""ClosureModel Manager."""
|
265
|
+
|
266
|
+
def get_queryset(self):
|
267
|
+
"""get_queryset method."""
|
268
|
+
return ClosureQuerySet(self.model, using=self._db)
|
269
|
+
|
270
|
+
def bulk_create(self, objs, batch_size=1000):
|
271
|
+
"""Create objects in bulk."""
|
272
|
+
return self.get_queryset().bulk_create(objs, batch_size=batch_size)
|
273
|
+
|
274
|
+
def bulk_update(self, objs, fields=None, batch_size=1000):
|
275
|
+
"""Move nodes in ClosureModel."""
|
276
|
+
return self.get_queryset().bulk_update(
|
277
|
+
objs, fields, batch_size=batch_size
|
278
|
+
)
|
treenode/models/__init__.py
CHANGED
@@ -0,0 +1,343 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
"""
|
3
|
+
TreeNode Proxy Model
|
4
|
+
|
5
|
+
This module defines an abstract base model `TreeNodeModel` that
|
6
|
+
implements hierarchical data storage using the Adjacency Table method.
|
7
|
+
It integrates with a Closure Table for optimized tree operations.
|
8
|
+
|
9
|
+
Features:
|
10
|
+
- Supports Adjacency List representation with parent-child relationships.
|
11
|
+
- Integrates with a Closure Table for efficient ancestor and descendant
|
12
|
+
queries.
|
13
|
+
- Provides a caching mechanism for performance optimization.
|
14
|
+
- Includes methods for tree traversal, manipulation, and serialization.
|
15
|
+
|
16
|
+
Version: 2.1.0
|
17
|
+
Author: Timur Kady
|
18
|
+
Email: timurkady@yandex.com
|
19
|
+
"""
|
20
|
+
|
21
|
+
from django.db import models, transaction
|
22
|
+
from django.db.models.signals import pre_save, post_save
|
23
|
+
from django.utils.translation import gettext_lazy as _
|
24
|
+
|
25
|
+
from .factory import TreeFactory
|
26
|
+
import treenode.models.mixins as mx
|
27
|
+
from ..managers import TreeNodeModelManager
|
28
|
+
from ..cache import treenode_cache, cached_method
|
29
|
+
from ..signals import disable_signals
|
30
|
+
from ..utils.base36 import to_base36
|
31
|
+
import logging
|
32
|
+
|
33
|
+
logger = logging.getLogger(__name__)
|
34
|
+
|
35
|
+
|
36
|
+
class TreeNodeModel(
|
37
|
+
mx.TreeNodeAncestorsMixin, mx.TreeNodeChildrenMixin,
|
38
|
+
mx.TreeNodeFamilyMixin, mx.TreeNodeDescendantsMixin,
|
39
|
+
mx.TreeNodeLogicalMixin, mx.TreeNodeNodeMixin,
|
40
|
+
mx.TreeNodePropertiesMixin, mx.TreeNodeRootsMixin,
|
41
|
+
mx.TreeNodeSiblingsMixin, mx.TreeNodeTreeMixin,
|
42
|
+
models.Model, metaclass=TreeFactory):
|
43
|
+
"""
|
44
|
+
Abstract TreeNode Model.
|
45
|
+
|
46
|
+
Implements hierarchy storage using the Adjacency Table method.
|
47
|
+
To increase performance, it has an additional attribute - a model
|
48
|
+
that stores data from the Adjacency Table in the form of
|
49
|
+
a Closure Table.
|
50
|
+
"""
|
51
|
+
|
52
|
+
treenode_display_field = None
|
53
|
+
treenode_sort_field = None # not now
|
54
|
+
closure_model = None
|
55
|
+
|
56
|
+
tn_parent = models.ForeignKey(
|
57
|
+
'self',
|
58
|
+
related_name='tn_children',
|
59
|
+
on_delete=models.CASCADE,
|
60
|
+
null=True,
|
61
|
+
blank=True,
|
62
|
+
verbose_name=_('Parent')
|
63
|
+
)
|
64
|
+
|
65
|
+
tn_priority = models.PositiveIntegerField(
|
66
|
+
default=0,
|
67
|
+
verbose_name=_('Priority')
|
68
|
+
)
|
69
|
+
|
70
|
+
objects = TreeNodeModelManager()
|
71
|
+
|
72
|
+
class Meta:
|
73
|
+
"""Meta Class."""
|
74
|
+
|
75
|
+
abstract = True
|
76
|
+
indexes = [
|
77
|
+
models.Index(fields=["tn_parent"]),
|
78
|
+
models.Index(fields=["tn_parent", "id"]),
|
79
|
+
models.Index(fields=["tn_parent", "tn_priority"]),
|
80
|
+
]
|
81
|
+
|
82
|
+
def __str__(self):
|
83
|
+
"""Display information about a class object."""
|
84
|
+
if self.treenode_display_field:
|
85
|
+
return str(getattr(self, self.treenode_display_field))
|
86
|
+
else:
|
87
|
+
return 'Node %d' % self.pk
|
88
|
+
|
89
|
+
# ---------------------------------------------------
|
90
|
+
# Public methods
|
91
|
+
# ---------------------------------------------------
|
92
|
+
|
93
|
+
@classmethod
|
94
|
+
def clear_cache(cls):
|
95
|
+
"""Clear cache for this model only."""
|
96
|
+
treenode_cache.invalidate(cls._meta.label)
|
97
|
+
|
98
|
+
@classmethod
|
99
|
+
def get_closure_model(cls):
|
100
|
+
"""Return ClosureModel for class."""
|
101
|
+
return cls.closure_model
|
102
|
+
|
103
|
+
def delete(self, cascade=True):
|
104
|
+
"""Delete node."""
|
105
|
+
model = self._meta.model
|
106
|
+
parent = self.get_parent()
|
107
|
+
|
108
|
+
if not cascade:
|
109
|
+
new_siblings_count = parent.get_siblings_count()
|
110
|
+
# Get a list of children
|
111
|
+
children = self.get_children()
|
112
|
+
if children:
|
113
|
+
# Move them to one level up
|
114
|
+
for child in children:
|
115
|
+
child.tn_parent = self.tn_parent
|
116
|
+
child.tn_priority = new_siblings_count + child.tn_priority
|
117
|
+
# Udate both models in bulk
|
118
|
+
model.objects.bulk_update(
|
119
|
+
children,
|
120
|
+
("tn_parent",),
|
121
|
+
batch_size=1000
|
122
|
+
)
|
123
|
+
|
124
|
+
# All descendants and related records in the ClosingModel will be
|
125
|
+
# cleared by cascading the removal of ForeignKeys.
|
126
|
+
super().delete()
|
127
|
+
# Can be excluded. The cache has already been cleared by the manager.
|
128
|
+
model.clear_cache()
|
129
|
+
|
130
|
+
# Update tn_priority
|
131
|
+
if parent is None:
|
132
|
+
siblings = model.get_roots()
|
133
|
+
else:
|
134
|
+
siblings = parent.get_children()
|
135
|
+
|
136
|
+
if siblings:
|
137
|
+
siblings = [node for node in siblings if node.pk != self.pk]
|
138
|
+
sorted_siblings = sorted(siblings, key=lambda x: x.tn_priority)
|
139
|
+
for index, node in enumerate(sorted_siblings):
|
140
|
+
node.tn_priority = index
|
141
|
+
model.objects.bulk_update(siblings, ['tn_priority'])
|
142
|
+
|
143
|
+
def save(self, force_insert=False, *args, **kwargs):
|
144
|
+
"""Save a model instance with sync closure table."""
|
145
|
+
model = self._meta.model
|
146
|
+
# Send signal pre_save
|
147
|
+
pre_save.send(
|
148
|
+
sender=model,
|
149
|
+
instance=self,
|
150
|
+
raw=False,
|
151
|
+
using=self._state.db,
|
152
|
+
update_fields=kwargs.get("update_fields", None)
|
153
|
+
)
|
154
|
+
|
155
|
+
# If the object already exists, get the old parent and priority values
|
156
|
+
is_new = self.pk is None
|
157
|
+
if not is_new:
|
158
|
+
old_parent, old_priority = model.objects\
|
159
|
+
.filter(pk=self.pk)\
|
160
|
+
.values_list('tn_parent', 'tn_priority')\
|
161
|
+
.first()
|
162
|
+
is_move = (old_priority != self.tn_priority)
|
163
|
+
else:
|
164
|
+
force_insert = True
|
165
|
+
is_move = False
|
166
|
+
old_parent = None
|
167
|
+
|
168
|
+
# Check if we are trying to move a node to a child
|
169
|
+
if old_parent and old_parent != self.tn_parent and self.tn_parent:
|
170
|
+
# Get pk of children via values_list to avoid creating full
|
171
|
+
# set of objects
|
172
|
+
if self.tn_parent.pk in self.get_descendants_pks():
|
173
|
+
raise ValueError("You cannot move a node into its own child.")
|
174
|
+
|
175
|
+
# Save the object and synchronize with the closing table
|
176
|
+
# Disable signals
|
177
|
+
with (disable_signals(pre_save, model),
|
178
|
+
disable_signals(post_save, model)):
|
179
|
+
|
180
|
+
if is_new or is_move:
|
181
|
+
self._update_priority()
|
182
|
+
super().save(force_insert=force_insert, *args, **kwargs)
|
183
|
+
# Run synchronize
|
184
|
+
if is_new:
|
185
|
+
self.closure_model.insert_node(self)
|
186
|
+
elif is_move:
|
187
|
+
subtree_nodes = self.get_descendants(include_self=True)
|
188
|
+
self.closure_model.move_node(subtree_nodes)
|
189
|
+
# Update priorities among neighbors or clear cache if there was
|
190
|
+
# no movement
|
191
|
+
|
192
|
+
# Clear model cache
|
193
|
+
model.clear_cache()
|
194
|
+
# Send signal post_save
|
195
|
+
post_save.send(sender=model, instance=self, created=is_new)
|
196
|
+
|
197
|
+
# ---------------------------------------------------
|
198
|
+
# Prived methods
|
199
|
+
#
|
200
|
+
# The usage of these methods is only allowed by developers. In future
|
201
|
+
# versions, these methods may be changed or removed without any warning.
|
202
|
+
# ---------------------------------------------------
|
203
|
+
|
204
|
+
def _update_priority(self):
|
205
|
+
"""Update tn_priority field for siblings."""
|
206
|
+
siblings = self.get_siblings()
|
207
|
+
siblings = sorted(siblings, key=lambda x: x.tn_priority)
|
208
|
+
insert_pos = min(self.tn_priority, len(siblings))
|
209
|
+
siblings.insert(insert_pos, self)
|
210
|
+
for index, node in enumerate(siblings):
|
211
|
+
node.tn_priority = index
|
212
|
+
siblings = [s for s in siblings if s.tn_priority != self.tn_priority]
|
213
|
+
|
214
|
+
# Save changes
|
215
|
+
model = self._meta.model
|
216
|
+
model.objects.bulk_update(siblings, ['tn_priority'])
|
217
|
+
model.clear_cache()
|
218
|
+
|
219
|
+
@classmethod
|
220
|
+
def _get_place(cls, target, position=0):
|
221
|
+
"""
|
222
|
+
Get position relative to the target node.
|
223
|
+
|
224
|
+
position – the position, relative to the target node, where the
|
225
|
+
current node object will be moved to, can be one of:
|
226
|
+
|
227
|
+
- first-root: the node will be the first root node;
|
228
|
+
- last-root: the node will be the last root node;
|
229
|
+
- sorted-root: the new node will be moved after sorting by
|
230
|
+
the treenode_sort_field field;
|
231
|
+
|
232
|
+
- first-sibling: the node will be the new leftmost sibling of the
|
233
|
+
target node;
|
234
|
+
- left-sibling: the node will take the target node’s place, which will
|
235
|
+
be moved to the target position with shifting follows nodes;
|
236
|
+
- right-sibling: the node will be moved to the position after the
|
237
|
+
target node;
|
238
|
+
- last-sibling: the node will be the new rightmost sibling of the
|
239
|
+
target node;
|
240
|
+
- sorted-sibling: the new node will be moved after sorting by
|
241
|
+
the treenode_sort_field field;
|
242
|
+
|
243
|
+
- first-child: the node will be the first child of the target node;
|
244
|
+
- last-child: the node will be the new rightmost child of the target
|
245
|
+
- sorted-child: the new node will be moved after sorting by
|
246
|
+
the treenode_sort_field field.
|
247
|
+
|
248
|
+
"""
|
249
|
+
if isinstance(position, int):
|
250
|
+
priority = position
|
251
|
+
elif not isinstance(position, str) or '-' not in position:
|
252
|
+
raise ValueError(f"Invalid position format: {position}")
|
253
|
+
|
254
|
+
part1, part2 = position.split('-')
|
255
|
+
if part1 not in {'first', 'last', 'left', 'right', 'sorted'} or \
|
256
|
+
part2 not in {'root', 'child', 'sibling'}:
|
257
|
+
raise ValueError(f"Unknown position type: {position}")
|
258
|
+
|
259
|
+
# Determine the parent depending on the type of position
|
260
|
+
if part2 == 'root':
|
261
|
+
parent = None
|
262
|
+
elif part2 == 'sibling':
|
263
|
+
parent = target.tn_parent
|
264
|
+
elif part2 == 'child':
|
265
|
+
parent = target
|
266
|
+
else:
|
267
|
+
parent = None
|
268
|
+
|
269
|
+
if parent:
|
270
|
+
count = parent.get_children_count()
|
271
|
+
else:
|
272
|
+
count = cls.get_roots_count()
|
273
|
+
|
274
|
+
# Определяем позицию (приоритет)
|
275
|
+
if part1 == 'first':
|
276
|
+
priority = 0
|
277
|
+
elif part1 == 'left':
|
278
|
+
priority = target.tn_priority
|
279
|
+
elif part1 == 'right':
|
280
|
+
priority = target.tn_priority + 1
|
281
|
+
elif part1 in {'last', 'sorted'}:
|
282
|
+
priority = count
|
283
|
+
else:
|
284
|
+
priority = count
|
285
|
+
|
286
|
+
return parent, priority
|
287
|
+
|
288
|
+
@classmethod
|
289
|
+
@cached_method
|
290
|
+
def _sort_node_list(cls, nodes):
|
291
|
+
"""
|
292
|
+
Sort list of nodes by materialized path oreder.
|
293
|
+
|
294
|
+
Collect the materialized path without accessing the DB and perform
|
295
|
+
sorting
|
296
|
+
"""
|
297
|
+
# Create a list of tuples: (node, materialized_path)
|
298
|
+
nodes_with_path = [(node, node.tn_order) for node in nodes]
|
299
|
+
# Sort the list by the materialized path
|
300
|
+
nodes_with_path.sort(key=lambda tup: tup[1])
|
301
|
+
# Extract sorted nodes
|
302
|
+
return [tup[0] for tup in nodes_with_path]
|
303
|
+
|
304
|
+
@classmethod
|
305
|
+
@cached_method
|
306
|
+
def _get_sorting_map(self, model):
|
307
|
+
"""Return the sorting map of model objects."""
|
308
|
+
# --1 Extracting data from the model
|
309
|
+
qs_list = model.objects.values_list('pk', 'tn_parent', 'tn_priority')
|
310
|
+
node_map = {pk: {"pk": pk, "parent": tn_parent, "priority": tn_priority}
|
311
|
+
for pk, tn_parent, tn_priority in qs_list}
|
312
|
+
|
313
|
+
def build_path(node_id):
|
314
|
+
"""Recursive path construction."""
|
315
|
+
path = []
|
316
|
+
while node_id:
|
317
|
+
node = node_map.get(node_id)
|
318
|
+
if not node:
|
319
|
+
break
|
320
|
+
path.append(node["priority"])
|
321
|
+
node_id = node["parent"]
|
322
|
+
return list(reversed(path))
|
323
|
+
|
324
|
+
# -- 2. Collecting materialized paths
|
325
|
+
paths = []
|
326
|
+
for pk, node in node_map.items():
|
327
|
+
path = build_path(pk)
|
328
|
+
paths.append({"pk": pk, "path": path})
|
329
|
+
|
330
|
+
# -- 3. Convert paths to strings
|
331
|
+
for item in paths:
|
332
|
+
pk_path = item["path"]
|
333
|
+
segments = [to_base36(i).rjust(6, '0') for i in pk_path]
|
334
|
+
item["path_str"] = "".join(segments)
|
335
|
+
|
336
|
+
# -- 5. Sort by string representation of the path
|
337
|
+
paths.sort(key=lambda x: x["path_str"])
|
338
|
+
index_map = {i: item["pk"] for i, item in enumerate(paths)}
|
339
|
+
|
340
|
+
return index_map
|
341
|
+
|
342
|
+
|
343
|
+
# The end
|