infrahub-server 1.4.0b0__py3-none-any.whl → 1.4.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/migrations/graph/__init__.py +2 -0
- infrahub/core/migrations/graph/m036_index_attr_vals.py +577 -0
- {infrahub_server-1.4.0b0.dist-info → infrahub_server-1.4.0b1.dist-info}/METADATA +2 -1
- {infrahub_server-1.4.0b0.dist-info → infrahub_server-1.4.0b1.dist-info}/RECORD +8 -7
- {infrahub_server-1.4.0b0.dist-info → infrahub_server-1.4.0b1.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.4.0b0.dist-info → infrahub_server-1.4.0b1.dist-info}/WHEEL +0 -0
- {infrahub_server-1.4.0b0.dist-info → infrahub_server-1.4.0b1.dist-info}/entry_points.txt +0 -0
infrahub/core/graph/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
GRAPH_VERSION =
|
|
1
|
+
GRAPH_VERSION = 36
|
|
@@ -37,6 +37,7 @@ from .m032_cleanup_orphaned_branch_relationships import Migration032
|
|
|
37
37
|
from .m033_deduplicate_relationship_vertices import Migration033
|
|
38
38
|
from .m034_find_orphaned_schema_fields import Migration034
|
|
39
39
|
from .m035_drop_attr_value_index import Migration035
|
|
40
|
+
from .m036_index_attr_vals import Migration036
|
|
40
41
|
|
|
41
42
|
if TYPE_CHECKING:
|
|
42
43
|
from infrahub.core.root import Root
|
|
@@ -79,6 +80,7 @@ MIGRATIONS: list[type[GraphMigration | InternalSchemaMigration | ArbitraryMigrat
|
|
|
79
80
|
Migration033,
|
|
80
81
|
Migration034,
|
|
81
82
|
Migration035,
|
|
83
|
+
Migration036,
|
|
82
84
|
]
|
|
83
85
|
|
|
84
86
|
|
|
@@ -0,0 +1,577 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import TYPE_CHECKING, Any
|
|
5
|
+
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
|
|
8
|
+
from infrahub.constants.database import IndexType
|
|
9
|
+
from infrahub.core.attribute import MAX_STRING_LENGTH
|
|
10
|
+
from infrahub.core.migrations.shared import MigrationResult
|
|
11
|
+
from infrahub.core.query import Query, QueryType
|
|
12
|
+
from infrahub.core.timestamp import Timestamp
|
|
13
|
+
from infrahub.database.index import IndexItem
|
|
14
|
+
from infrahub.database.neo4j import IndexManagerNeo4j
|
|
15
|
+
from infrahub.log import get_logger
|
|
16
|
+
|
|
17
|
+
from ..shared import ArbitraryMigration
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from infrahub.database import InfrahubDatabase
|
|
21
|
+
|
|
22
|
+
log = get_logger()
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
AV_INDEXED_INDEX = IndexItem(
|
|
26
|
+
name="attr_value_indexed", label="AttributeValueIndexed", properties=["value"], type=IndexType.RANGE
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class SchemaAttributeTimeframe:
|
|
32
|
+
kind: str
|
|
33
|
+
attr_name: str
|
|
34
|
+
branch: str
|
|
35
|
+
branch_level: int
|
|
36
|
+
branched_from: str
|
|
37
|
+
from_time: str
|
|
38
|
+
is_default_branch: bool
|
|
39
|
+
is_large_type: bool
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class GetLargeAttributeTypesQuery(Query):
|
|
43
|
+
"""For every active attribute on every branch, return a SchemaAttributeTimeframe object"""
|
|
44
|
+
|
|
45
|
+
name = "get_large_attribute_types_query"
|
|
46
|
+
type = QueryType.READ
|
|
47
|
+
insert_return = False
|
|
48
|
+
|
|
49
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
50
|
+
query = """
|
|
51
|
+
MATCH (branch:Branch)
|
|
52
|
+
// --------------
|
|
53
|
+
// find all SchemaAttributes with a LARGE_ATTRIBUTE_TYPE kind for each branch
|
|
54
|
+
// --------------
|
|
55
|
+
MATCH (schema_attr:SchemaAttribute)-[r1:HAS_ATTRIBUTE]->(attr_kind:Attribute {name: "kind"})-[r2:HAS_VALUE]->(attr_kind_value)
|
|
56
|
+
WHERE r1.status = "active" and r1.to IS NULL AND r2.status = "active" and r2.to IS NULL
|
|
57
|
+
WITH DISTINCT branch, schema_attr, attr_kind
|
|
58
|
+
CALL (schema_attr, attr_kind, branch) {
|
|
59
|
+
MATCH (schema_attr)-[has_attr:HAS_ATTRIBUTE]->(attr_kind)-[has_value:HAS_VALUE]->(attr_kind_value)
|
|
60
|
+
WHERE has_attr.status = "active"
|
|
61
|
+
AND has_value.status = "active"
|
|
62
|
+
AND has_attr.to IS NULL
|
|
63
|
+
AND has_value.to IS NULL
|
|
64
|
+
AND (
|
|
65
|
+
has_attr.branch = branch.name
|
|
66
|
+
OR (has_attr.branch_level < branch.hierarchy_level AND has_attr.from <= branch.branched_from)
|
|
67
|
+
)
|
|
68
|
+
AND (
|
|
69
|
+
has_value.branch = branch.name
|
|
70
|
+
OR (has_value.branch_level < branch.hierarchy_level AND has_value.from <= branch.branched_from)
|
|
71
|
+
)
|
|
72
|
+
WITH has_value.from AS from_time, attr_kind_value.value AS attr_type
|
|
73
|
+
ORDER BY has_value.branch_level DESC, has_value.from DESC
|
|
74
|
+
LIMIT 1
|
|
75
|
+
WITH from_time, attr_type
|
|
76
|
+
RETURN from_time, attr_type IN ["JSON", "List", "TextArea"] AS is_large_type
|
|
77
|
+
}
|
|
78
|
+
CALL (schema_attr, branch) {
|
|
79
|
+
// --------------
|
|
80
|
+
// get the attribute name
|
|
81
|
+
// --------------
|
|
82
|
+
MATCH (schema_attr)-[r1:HAS_ATTRIBUTE]->(:Attribute {name: "name"})-[r2:HAS_VALUE]->(name_value)
|
|
83
|
+
WHERE r1.status = "active"
|
|
84
|
+
AND r1.to IS NULL
|
|
85
|
+
AND (
|
|
86
|
+
r1.branch = branch.name
|
|
87
|
+
OR (r1.branch_level < branch.hierarchy_level AND r1.from <= branch.branched_from)
|
|
88
|
+
)
|
|
89
|
+
AND r2.status = "active"
|
|
90
|
+
AND r2.to IS NULL
|
|
91
|
+
AND (
|
|
92
|
+
r2.branch = branch.name
|
|
93
|
+
OR (r2.branch_level < branch.hierarchy_level AND r2.from <= branch.branched_from)
|
|
94
|
+
)
|
|
95
|
+
WITH name_value.value AS attr_name
|
|
96
|
+
ORDER BY r2.branch_level DESC, r1.branch_level DESC, r2.from DESC, r1.from DESC
|
|
97
|
+
LIMIT 1
|
|
98
|
+
|
|
99
|
+
// --------------
|
|
100
|
+
// get the the schema node/generic
|
|
101
|
+
// --------------
|
|
102
|
+
MATCH (schema_attr)-[r1:IS_RELATED]-(:Relationship {name: "schema__node__attributes"})-[r2:IS_RELATED]-(schema_node:SchemaNode|SchemaGeneric)
|
|
103
|
+
WHERE r1.status = "active"
|
|
104
|
+
AND r1.to IS NULL
|
|
105
|
+
AND (
|
|
106
|
+
r1.branch = branch.name
|
|
107
|
+
OR (r1.branch_level < branch.hierarchy_level AND r1.from <= branch.branched_from)
|
|
108
|
+
)
|
|
109
|
+
AND r2.status = "active"
|
|
110
|
+
AND r2.to IS NULL
|
|
111
|
+
AND (
|
|
112
|
+
r2.branch = branch.name
|
|
113
|
+
OR (r2.branch_level < branch.hierarchy_level AND r2.from <= branch.branched_from)
|
|
114
|
+
)
|
|
115
|
+
WITH attr_name, schema_node
|
|
116
|
+
ORDER BY r2.branch_level DESC, r1.branch_level DESC, r2.from DESC, r1.from DESC
|
|
117
|
+
LIMIT 1
|
|
118
|
+
|
|
119
|
+
// --------------
|
|
120
|
+
// find the namespace for this SchemaNode/SchemaGeneric
|
|
121
|
+
// --------------
|
|
122
|
+
MATCH (schema_node)-[r1:HAS_ATTRIBUTE]->(:Attribute {name: "namespace"})-[r2:HAS_VALUE]->(kind_namespace_value)
|
|
123
|
+
WHERE r1.status = "active"
|
|
124
|
+
AND r1.to IS NULL
|
|
125
|
+
AND (
|
|
126
|
+
r1.branch = branch.name
|
|
127
|
+
OR (r1.branch_level < branch.hierarchy_level AND r1.from <= branch.branched_from)
|
|
128
|
+
)
|
|
129
|
+
AND r2.status = "active"
|
|
130
|
+
AND r2.to IS NULL
|
|
131
|
+
AND (
|
|
132
|
+
r2.branch = branch.name
|
|
133
|
+
OR (r2.branch_level < branch.hierarchy_level AND r2.from <= branch.branched_from)
|
|
134
|
+
)
|
|
135
|
+
WITH attr_name, schema_node, kind_namespace_value.value AS kind_namespace
|
|
136
|
+
ORDER BY r2.branch_level DESC, r1.branch_level DESC, r2.from DESC, r1.from DESC
|
|
137
|
+
LIMIT 1
|
|
138
|
+
|
|
139
|
+
// --------------
|
|
140
|
+
// find the name for this SchemaNode/SchemaGeneric
|
|
141
|
+
// --------------
|
|
142
|
+
MATCH (schema_node)-[r1:HAS_ATTRIBUTE]->(:Attribute {name: "name"})-[r2:HAS_VALUE]->(kind_name_value)
|
|
143
|
+
WHERE r1.status = "active"
|
|
144
|
+
AND r1.to IS NULL
|
|
145
|
+
AND (
|
|
146
|
+
r1.branch = branch.name
|
|
147
|
+
OR (r1.branch_level < branch.hierarchy_level AND r1.from <= branch.branched_from)
|
|
148
|
+
)
|
|
149
|
+
AND r2.status = "active"
|
|
150
|
+
AND r2.to IS NULL
|
|
151
|
+
AND (
|
|
152
|
+
r2.branch = branch.name
|
|
153
|
+
OR (r2.branch_level < branch.hierarchy_level AND r2.from <= branch.branched_from)
|
|
154
|
+
)
|
|
155
|
+
WITH attr_name, kind_namespace, kind_name_value.value AS kind_name
|
|
156
|
+
ORDER BY r2.branch_level DESC, r1.branch_level DESC, r2.from DESC, r1.from DESC
|
|
157
|
+
LIMIT 1
|
|
158
|
+
RETURN attr_name, kind_namespace, kind_name
|
|
159
|
+
}
|
|
160
|
+
RETURN
|
|
161
|
+
kind_namespace,
|
|
162
|
+
kind_name,
|
|
163
|
+
attr_name,
|
|
164
|
+
branch.name AS branch,
|
|
165
|
+
branch.hierarchy_level AS branch_level,
|
|
166
|
+
branch.branched_from AS branched_from,
|
|
167
|
+
branch.is_default AS is_default_branch,
|
|
168
|
+
from_time,
|
|
169
|
+
is_large_type
|
|
170
|
+
"""
|
|
171
|
+
self.add_to_query(query)
|
|
172
|
+
self.return_labels = [
|
|
173
|
+
"kind_namespace",
|
|
174
|
+
"kind_name",
|
|
175
|
+
"attr_name",
|
|
176
|
+
"branch",
|
|
177
|
+
"branch_level",
|
|
178
|
+
"branched_from",
|
|
179
|
+
"is_default_branch",
|
|
180
|
+
"from_time",
|
|
181
|
+
"is_large_type",
|
|
182
|
+
]
|
|
183
|
+
|
|
184
|
+
def get_large_attribute_type_timeframes(self) -> list[SchemaAttributeTimeframe]:
|
|
185
|
+
schema_attribute_timeframes: list[SchemaAttributeTimeframe] = []
|
|
186
|
+
for result in self.get_results():
|
|
187
|
+
kind_namespace = result.get_as_type("kind_namespace", return_type=str)
|
|
188
|
+
kind_name = result.get_as_type("kind_name", return_type=str)
|
|
189
|
+
attr_name = result.get_as_type("attr_name", return_type=str)
|
|
190
|
+
branch = result.get_as_type("branch", return_type=str)
|
|
191
|
+
branch_level = result.get_as_type("branch_level", return_type=int)
|
|
192
|
+
branched_from = result.get_as_type("branched_from", return_type=str)
|
|
193
|
+
is_default_branch = result.get_as_type("is_default_branch", return_type=bool)
|
|
194
|
+
is_large_type = result.get_as_type("is_large_type", return_type=bool)
|
|
195
|
+
from_time = result.get_as_type("from_time", return_type=str)
|
|
196
|
+
kind = f"{kind_namespace}{kind_name}"
|
|
197
|
+
schema_attribute_timeframes.append(
|
|
198
|
+
SchemaAttributeTimeframe(
|
|
199
|
+
kind=kind,
|
|
200
|
+
attr_name=attr_name,
|
|
201
|
+
branch=branch,
|
|
202
|
+
branch_level=branch_level,
|
|
203
|
+
branched_from=branched_from,
|
|
204
|
+
from_time=from_time,
|
|
205
|
+
is_default_branch=is_default_branch,
|
|
206
|
+
is_large_type=is_large_type,
|
|
207
|
+
)
|
|
208
|
+
)
|
|
209
|
+
return schema_attribute_timeframes
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
class DeIndexLargeAttributeValuesQuery(Query):
|
|
213
|
+
name = "de_index_large_attribute_values_query"
|
|
214
|
+
type = QueryType.WRITE
|
|
215
|
+
insert_return = False
|
|
216
|
+
|
|
217
|
+
def __init__(self, max_value_size: int, **kwargs: Any) -> None:
|
|
218
|
+
self.max_value_size = max_value_size
|
|
219
|
+
super().__init__(**kwargs)
|
|
220
|
+
|
|
221
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
222
|
+
self.params["max_value_size"] = self.max_value_size
|
|
223
|
+
query = """
|
|
224
|
+
MATCH (av:AttributeValueIndexed)
|
|
225
|
+
WHERE size(toString(av.value)) > $max_value_size
|
|
226
|
+
REMOVE av:AttributeValueIndexed
|
|
227
|
+
"""
|
|
228
|
+
self.add_to_query(query)
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
class CreateNonIndexedAttributeValueQuery(Query):
|
|
232
|
+
name = "create_non_indexed_attribute_value_query"
|
|
233
|
+
type = QueryType.WRITE
|
|
234
|
+
insert_return = False
|
|
235
|
+
|
|
236
|
+
def __init__(
|
|
237
|
+
self,
|
|
238
|
+
schema_attribute_timeframe: SchemaAttributeTimeframe,
|
|
239
|
+
**kwargs: Any,
|
|
240
|
+
) -> None:
|
|
241
|
+
super().__init__(**kwargs)
|
|
242
|
+
self.schema_attribute_timeframe = schema_attribute_timeframe
|
|
243
|
+
|
|
244
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
245
|
+
self.params.update(
|
|
246
|
+
{
|
|
247
|
+
"attribute_name": self.schema_attribute_timeframe.attr_name,
|
|
248
|
+
"branch": self.schema_attribute_timeframe.branch,
|
|
249
|
+
"branch_level": self.schema_attribute_timeframe.branch_level,
|
|
250
|
+
"branched_from": self.schema_attribute_timeframe.branched_from,
|
|
251
|
+
"from_time": self.schema_attribute_timeframe.from_time,
|
|
252
|
+
}
|
|
253
|
+
)
|
|
254
|
+
query = """
|
|
255
|
+
MATCH (node:Node:%(schema_kind)s)
|
|
256
|
+
CALL (node) {
|
|
257
|
+
MATCH (node)-[has_attr_e:HAS_ATTRIBUTE]->(attr:Attribute)
|
|
258
|
+
WHERE attr.name = $attribute_name
|
|
259
|
+
AND (
|
|
260
|
+
has_attr_e.branch = $branch
|
|
261
|
+
OR (has_attr_e.branch_level < $branch_level AND has_attr_e.from <= $branched_from)
|
|
262
|
+
)
|
|
263
|
+
AND has_attr_e.status = "active"
|
|
264
|
+
AND has_attr_e.to IS NULL
|
|
265
|
+
WITH attr
|
|
266
|
+
ORDER BY has_attr_e.branch_level DESC, has_attr_e.from DESC
|
|
267
|
+
LIMIT 1
|
|
268
|
+
|
|
269
|
+
// --------------
|
|
270
|
+
// identify the active HAS_VALUE edges that we need to consider
|
|
271
|
+
// --------------
|
|
272
|
+
MATCH (attr)-[has_val_e:HAS_VALUE]->(av)
|
|
273
|
+
WHERE (
|
|
274
|
+
has_val_e.branch = $branch
|
|
275
|
+
OR (has_val_e.branch_level < $branch_level AND has_val_e.from <= $branched_from)
|
|
276
|
+
)
|
|
277
|
+
AND has_val_e.status = "active"
|
|
278
|
+
AND has_val_e.to IS NULL
|
|
279
|
+
RETURN attr, has_val_e, av
|
|
280
|
+
ORDER BY has_val_e.branch_level DESC, has_val_e.from DESC
|
|
281
|
+
LIMIT 1
|
|
282
|
+
}
|
|
283
|
+
// --------------------
|
|
284
|
+
// determine the timestamp to de-index the AttributeValue
|
|
285
|
+
// --------------------
|
|
286
|
+
WITH attr, has_val_e, av,
|
|
287
|
+
CASE
|
|
288
|
+
WHEN $from_time <= has_val_e.from THEN has_val_e.from
|
|
289
|
+
ELSE $from_time
|
|
290
|
+
END AS non_indexed_from
|
|
291
|
+
|
|
292
|
+
// --------------------
|
|
293
|
+
// create the new edge to the AttributeValueNonIndexed vertex, if necessary
|
|
294
|
+
// --------------------
|
|
295
|
+
WITH attr, has_val_e, av, non_indexed_from
|
|
296
|
+
CALL (attr, has_val_e, av, non_indexed_from) {
|
|
297
|
+
WITH has_val_e
|
|
298
|
+
WHERE NOT "AttributeValueNonIndexed" IN labels(av)
|
|
299
|
+
|
|
300
|
+
MERGE (av_no_index:AttributeValueNonIndexed {value: av.value, is_default: av.is_default})
|
|
301
|
+
LIMIT 1
|
|
302
|
+
|
|
303
|
+
CREATE (attr)-[add_no_index_on_branch:HAS_VALUE]->(av_no_index)
|
|
304
|
+
SET add_no_index_on_branch = properties(has_val_e)
|
|
305
|
+
SET
|
|
306
|
+
add_no_index_on_branch.branch = $branch,
|
|
307
|
+
add_no_index_on_branch.branch_level = $branch_level,
|
|
308
|
+
add_no_index_on_branch.from = non_indexed_from,
|
|
309
|
+
add_no_index_on_branch.status = "active",
|
|
310
|
+
add_no_index_on_branch.to = NULL
|
|
311
|
+
|
|
312
|
+
// --------------------
|
|
313
|
+
// delete existing active edge if it is on this branch and we created a new edge
|
|
314
|
+
// --------------------
|
|
315
|
+
WITH has_val_e
|
|
316
|
+
WHERE has_val_e.branch = $branch
|
|
317
|
+
DELETE has_val_e
|
|
318
|
+
|
|
319
|
+
}
|
|
320
|
+
""" % {"schema_kind": self.schema_attribute_timeframe.kind}
|
|
321
|
+
self.add_to_query(query)
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
class RevertNonIndexOnBranchQuery(Query):
|
|
325
|
+
name = "revert_non_index_on_branch_query"
|
|
326
|
+
type = QueryType.WRITE
|
|
327
|
+
insert_return = False
|
|
328
|
+
|
|
329
|
+
def __init__(
|
|
330
|
+
self,
|
|
331
|
+
schema_attribute_timeframe: SchemaAttributeTimeframe,
|
|
332
|
+
**kwargs: Any,
|
|
333
|
+
) -> None:
|
|
334
|
+
super().__init__(**kwargs)
|
|
335
|
+
self.schema_attribute_timeframe = schema_attribute_timeframe
|
|
336
|
+
|
|
337
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
338
|
+
self.params.update(
|
|
339
|
+
{
|
|
340
|
+
"attribute_name": self.schema_attribute_timeframe.attr_name,
|
|
341
|
+
"branch": self.schema_attribute_timeframe.branch,
|
|
342
|
+
"branch_level": self.schema_attribute_timeframe.branch_level,
|
|
343
|
+
"branched_from": self.schema_attribute_timeframe.branched_from,
|
|
344
|
+
"from_time": self.schema_attribute_timeframe.from_time,
|
|
345
|
+
}
|
|
346
|
+
)
|
|
347
|
+
query = """
|
|
348
|
+
MATCH (node:Node:%(schema_kind)s)
|
|
349
|
+
CALL (node) {
|
|
350
|
+
MATCH (node)-[has_attr_e:HAS_ATTRIBUTE]->(attr:Attribute)
|
|
351
|
+
WHERE attr.name = $attribute_name
|
|
352
|
+
AND (
|
|
353
|
+
has_attr_e.branch = $branch
|
|
354
|
+
OR (has_attr_e.branch_level < $branch_level AND has_attr_e.from <= $branched_from)
|
|
355
|
+
)
|
|
356
|
+
AND has_attr_e.status = "active"
|
|
357
|
+
AND has_attr_e.to IS NULL
|
|
358
|
+
WITH attr
|
|
359
|
+
ORDER BY has_attr_e.branch_level DESC, has_attr_e.from DESC
|
|
360
|
+
LIMIT 1
|
|
361
|
+
|
|
362
|
+
// --------------
|
|
363
|
+
// identify the active HAS_VALUE edges that we need to consider
|
|
364
|
+
// --------------
|
|
365
|
+
MATCH (attr)-[has_val_e:HAS_VALUE]->(av)
|
|
366
|
+
WHERE (
|
|
367
|
+
has_val_e.branch = $branch
|
|
368
|
+
OR (has_val_e.branch_level < $branch_level AND has_val_e.from <= $branched_from)
|
|
369
|
+
)
|
|
370
|
+
AND has_val_e.status = "active"
|
|
371
|
+
AND has_val_e.to IS NULL
|
|
372
|
+
RETURN attr, has_val_e, av
|
|
373
|
+
ORDER BY has_val_e.branch_level DESC, has_val_e.from DESC
|
|
374
|
+
LIMIT 1
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
// --------------------
|
|
378
|
+
// determine the timestamp to index the AttributeValue
|
|
379
|
+
// --------------------
|
|
380
|
+
WITH attr, has_val_e, av,
|
|
381
|
+
CASE
|
|
382
|
+
WHEN $from_time <= has_val_e.from THEN has_val_e.from
|
|
383
|
+
ELSE $from_time
|
|
384
|
+
END AS indexed_from
|
|
385
|
+
|
|
386
|
+
// --------------------
|
|
387
|
+
// create the new edge to the AttributeValue vertex
|
|
388
|
+
// --------------------
|
|
389
|
+
WITH attr, has_val_e, av, indexed_from
|
|
390
|
+
CALL (attr, has_val_e, av, indexed_from) {
|
|
391
|
+
WITH has_val_e
|
|
392
|
+
WHERE NOT "AttributeValue" IN labels(av)
|
|
393
|
+
|
|
394
|
+
MERGE (av_index:AttributeValue {value: av.value, is_default: av.is_default})
|
|
395
|
+
LIMIT 1
|
|
396
|
+
|
|
397
|
+
CREATE (attr)-[add_index_on_branch:HAS_VALUE]->(av_index)
|
|
398
|
+
SET add_index_on_branch = properties(has_val_e)
|
|
399
|
+
SET
|
|
400
|
+
add_index_on_branch.branch = $branch,
|
|
401
|
+
add_index_on_branch.branch_level = $branch_level,
|
|
402
|
+
add_index_on_branch.from = indexed_from,
|
|
403
|
+
add_index_on_branch.status = "active",
|
|
404
|
+
add_index_on_branch.to = NULL
|
|
405
|
+
|
|
406
|
+
// --------------------
|
|
407
|
+
// delete existing active edge if it is on this branch and we created a new edge
|
|
408
|
+
// --------------------
|
|
409
|
+
WITH has_val_e
|
|
410
|
+
WHERE has_val_e.branch = $branch AND add_index_on_branch IS NOT NULL
|
|
411
|
+
DELETE has_val_e
|
|
412
|
+
}
|
|
413
|
+
""" % {"schema_kind": self.schema_attribute_timeframe.kind}
|
|
414
|
+
self.add_to_query(query)
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
class SetAttributeValueIndexedQuery(Query):
|
|
418
|
+
name = "set_attribute_value_indexed_query"
|
|
419
|
+
type = QueryType.WRITE
|
|
420
|
+
insert_return = False
|
|
421
|
+
|
|
422
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
423
|
+
query = """
|
|
424
|
+
MATCH (av:AttributeValue)
|
|
425
|
+
SET av:AttributeValueIndexed
|
|
426
|
+
"""
|
|
427
|
+
self.add_to_query(query)
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
class FinalizeAttributeValueNonIndexedQuery(Query):
|
|
431
|
+
name = "finalize_attribute_value_non_indexed_query"
|
|
432
|
+
type = QueryType.WRITE
|
|
433
|
+
insert_return = False
|
|
434
|
+
|
|
435
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
436
|
+
query = """
|
|
437
|
+
MATCH (av_no_index:AttributeValueNonIndexed)
|
|
438
|
+
SET av_no_index:AttributeValue
|
|
439
|
+
REMOVE av_no_index:AttributeValueNonIndexed
|
|
440
|
+
"""
|
|
441
|
+
self.add_to_query(query)
|
|
442
|
+
|
|
443
|
+
|
|
444
|
+
class Migration036(ArbitraryMigration):
|
|
445
|
+
"""
|
|
446
|
+
Update AttributeValue vertices to be AttributeValueIndexed, unless they include values for LARGE_ATTRIBUTE_TYPES
|
|
447
|
+
|
|
448
|
+
0. Drop the index on the AttributeValueIndexed vertex, there are no AttributeValueIndexed vertices at this point anyway
|
|
449
|
+
1. For all attributes of all schema on all branches, determine if the attribute is a LARGE_ATTRIBUTE_TYPE and when
|
|
450
|
+
attribute's kind was last updated in the schema
|
|
451
|
+
2. For all branches, starting with the default and global branches, update HAS_VALUE edges for LARGE_ATTRIBUTE_TYPE
|
|
452
|
+
attributes to point to AttributeValueNonIndexed vertices
|
|
453
|
+
3. For any LARGE_ATTRIBUTE_TYPE attributes on the default branch that were updated to non-large_type on other branches,
|
|
454
|
+
revert the HAS_VALUE edges to point to AttributeValue vertices
|
|
455
|
+
4. Add the AttributeValueIndexed label to all AttributeValue vertices
|
|
456
|
+
5. Update all AttributeValueNonIndexed vertices to AttributeValue (no AttributeValueIndexed label)
|
|
457
|
+
6. Any AttributeValueIndexed vertices with a value of size greater than MAX_STRING_LENGTH are changed to AttributeValueNonIndexed
|
|
458
|
+
7. Add the index on AttributeValueIndexed again
|
|
459
|
+
"""
|
|
460
|
+
|
|
461
|
+
name: str = "036_index_attr_vals"
|
|
462
|
+
minimum_version: int = 35
|
|
463
|
+
|
|
464
|
+
async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
|
|
465
|
+
result = MigrationResult()
|
|
466
|
+
|
|
467
|
+
return result
|
|
468
|
+
|
|
469
|
+
async def execute(self, db: InfrahubDatabase) -> MigrationResult: # noqa: PLR0915
|
|
470
|
+
console = Console()
|
|
471
|
+
result = MigrationResult()
|
|
472
|
+
|
|
473
|
+
# find the active schema attributes that have a LARGE_ATTRIBUTE_TYPE kind on all branches
|
|
474
|
+
console.print(
|
|
475
|
+
f"{Timestamp().to_string()} Determining schema attribute types and timestamps on all branches...", end=""
|
|
476
|
+
)
|
|
477
|
+
get_large_attribute_types_query = await GetLargeAttributeTypesQuery.init(db=db)
|
|
478
|
+
await get_large_attribute_types_query.execute(db=db)
|
|
479
|
+
schema_attribute_timeframes = get_large_attribute_types_query.get_large_attribute_type_timeframes()
|
|
480
|
+
console.print("done")
|
|
481
|
+
|
|
482
|
+
# find which schema attributes are large_types in the default branch, but updated to non-large_type on other branches
|
|
483
|
+
# {(kind, attr_name): SchemaAttributeTimeframe}
|
|
484
|
+
console.print(
|
|
485
|
+
f"{Timestamp().to_string()} Determining which schema attributes have been updated to non-large_type on non-default branches...",
|
|
486
|
+
end="",
|
|
487
|
+
)
|
|
488
|
+
main_schema_attribute_timeframes_map: dict[tuple[str, str], SchemaAttributeTimeframe] = {}
|
|
489
|
+
for schema_attr_time in schema_attribute_timeframes:
|
|
490
|
+
if schema_attr_time.is_default_branch:
|
|
491
|
+
main_schema_attribute_timeframes_map[schema_attr_time.kind, schema_attr_time.attr_name] = (
|
|
492
|
+
schema_attr_time
|
|
493
|
+
)
|
|
494
|
+
large_type_reverts: list[SchemaAttributeTimeframe] = []
|
|
495
|
+
for schema_attr_time in schema_attribute_timeframes:
|
|
496
|
+
if schema_attr_time.is_default_branch or schema_attr_time.is_large_type:
|
|
497
|
+
continue
|
|
498
|
+
default_schema_attr_time = main_schema_attribute_timeframes_map.get(
|
|
499
|
+
(schema_attr_time.kind, schema_attr_time.attr_name)
|
|
500
|
+
)
|
|
501
|
+
if not default_schema_attr_time:
|
|
502
|
+
continue
|
|
503
|
+
if (
|
|
504
|
+
default_schema_attr_time.is_large_type
|
|
505
|
+
and default_schema_attr_time.from_time < schema_attr_time.branched_from
|
|
506
|
+
):
|
|
507
|
+
large_type_reverts.append(schema_attr_time)
|
|
508
|
+
console.print("done")
|
|
509
|
+
|
|
510
|
+
# drop the index on the AttributeValueNonIndexed vertex, there won't be any at this point anyway
|
|
511
|
+
console.print(f"{Timestamp().to_string()} Dropping index on AttributeValueIndexed vertices...", end="")
|
|
512
|
+
index_manager = IndexManagerNeo4j(db=db)
|
|
513
|
+
index_manager.init(nodes=[AV_INDEXED_INDEX], rels=[])
|
|
514
|
+
await index_manager.drop()
|
|
515
|
+
console.print("done")
|
|
516
|
+
|
|
517
|
+
# create the temporary non-indexed attribute value vertices for LARGE_ATTRIBUTE_TYPE attributes
|
|
518
|
+
# start with default branch
|
|
519
|
+
console.print(f"{Timestamp().to_string()} Update non-indexed attribute values with temporary label...", end="")
|
|
520
|
+
large_schema_attribute_timeframes = [
|
|
521
|
+
schema_attr_time for schema_attr_time in schema_attribute_timeframes if schema_attr_time.is_large_type
|
|
522
|
+
]
|
|
523
|
+
for schema_attr_time in sorted(large_schema_attribute_timeframes, key=lambda x: x.branch_level):
|
|
524
|
+
create_non_indexed_attribute_value_query = await CreateNonIndexedAttributeValueQuery.init(
|
|
525
|
+
db=db, schema_attribute_timeframe=schema_attr_time
|
|
526
|
+
)
|
|
527
|
+
await create_non_indexed_attribute_value_query.execute(db=db)
|
|
528
|
+
console.print("done")
|
|
529
|
+
|
|
530
|
+
# re-index attribute values on branches where the type was updated to non-large_type
|
|
531
|
+
console.print(
|
|
532
|
+
f"{Timestamp().to_string()} Indexing attribute values on branches where the attribute schema was updated to a non-large_type...",
|
|
533
|
+
end="",
|
|
534
|
+
)
|
|
535
|
+
for schema_attr_time in large_type_reverts:
|
|
536
|
+
revert_non_index_on_branch_query = await RevertNonIndexOnBranchQuery.init(
|
|
537
|
+
db=db, schema_attribute_timeframe=schema_attr_time
|
|
538
|
+
)
|
|
539
|
+
await revert_non_index_on_branch_query.execute(db=db)
|
|
540
|
+
console.print("done")
|
|
541
|
+
|
|
542
|
+
# set the AttributeValue vertices to be AttributeValueIndexed
|
|
543
|
+
console.print(
|
|
544
|
+
f"{Timestamp().to_string()} Update all AttributeValue vertices to add the AttributeValueIndexed label...",
|
|
545
|
+
end="",
|
|
546
|
+
)
|
|
547
|
+
set_attribute_value_indexed_query = await SetAttributeValueIndexedQuery.init(db=db)
|
|
548
|
+
await set_attribute_value_indexed_query.execute(db=db)
|
|
549
|
+
console.print("done")
|
|
550
|
+
|
|
551
|
+
# set AttributeValueNonIndexed vertices to just AttributeValue
|
|
552
|
+
console.print(
|
|
553
|
+
f"{Timestamp().to_string()} Update all AttributeValueNonIndexed vertices to be AttributeValue (no index)...",
|
|
554
|
+
end="",
|
|
555
|
+
)
|
|
556
|
+
finalize_attribute_value_non_indexed_query = await FinalizeAttributeValueNonIndexedQuery.init(db=db)
|
|
557
|
+
await finalize_attribute_value_non_indexed_query.execute(db=db)
|
|
558
|
+
console.print("done")
|
|
559
|
+
|
|
560
|
+
# de-index all attribute values too large to be indexed
|
|
561
|
+
console.print(
|
|
562
|
+
f"{Timestamp().to_string()} De-index any legacy attribute data that is too large to be indexed...", end=""
|
|
563
|
+
)
|
|
564
|
+
de_index_large_attribute_values_query = await DeIndexLargeAttributeValuesQuery.init(
|
|
565
|
+
db=db, max_value_size=MAX_STRING_LENGTH
|
|
566
|
+
)
|
|
567
|
+
await de_index_large_attribute_values_query.execute(db=db)
|
|
568
|
+
console.print("done")
|
|
569
|
+
|
|
570
|
+
# add the index back to the AttributeValueNonIndexed vertex
|
|
571
|
+
console.print(f"{Timestamp().to_string()} Add the index back to the AttributeValueIndexed label...", end="")
|
|
572
|
+
index_manager = IndexManagerNeo4j(db=db)
|
|
573
|
+
index_manager.init(nodes=[AV_INDEXED_INDEX], rels=[])
|
|
574
|
+
await index_manager.add()
|
|
575
|
+
console.print("done")
|
|
576
|
+
|
|
577
|
+
return result
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: infrahub-server
|
|
3
|
-
Version: 1.4.
|
|
3
|
+
Version: 1.4.0b1
|
|
4
4
|
Summary: Infrahub is taking a new approach to Infrastructure Management by providing a new generation of datastore to organize and control all the data that defines how an infrastructure should run.
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Author: OpsMill
|
|
@@ -19,6 +19,7 @@ Requires-Dist: asgi-correlation-id (==4.2.0)
|
|
|
19
19
|
Requires-Dist: authlib (==1.3.2)
|
|
20
20
|
Requires-Dist: bcrypt (>=4.1,<4.2)
|
|
21
21
|
Requires-Dist: boto3 (==1.34.129)
|
|
22
|
+
Requires-Dist: copier (>=9.8.0,<10.0.0)
|
|
22
23
|
Requires-Dist: dulwich (>=0.22.7,<0.23.0)
|
|
23
24
|
Requires-Dist: email-validator (>=2.1,<2.2)
|
|
24
25
|
Requires-Dist: fast-depends (>=2.4.12,<3.0.0)
|
|
@@ -137,7 +137,7 @@ infrahub/core/diff/repository/deserializer.py,sha256=bhN9ao8HxqKyRz273QGLNV9z9_S
|
|
|
137
137
|
infrahub/core/diff/repository/repository.py,sha256=u0QTMY1e2dknG_DuRAwzFt-Lp1_mdj5lqF2ymt77k9E,25581
|
|
138
138
|
infrahub/core/diff/tasks.py,sha256=jSXlenTJ5Fc189Xvm971e3-gBDRnfN19cxNaWvEFwAE,3306
|
|
139
139
|
infrahub/core/enums.py,sha256=qGbhRVoH43Xi0iDkUfWdQiKapJbLT9UKsCobFk_paIk,491
|
|
140
|
-
infrahub/core/graph/__init__.py,sha256=
|
|
140
|
+
infrahub/core/graph/__init__.py,sha256=tsz9YAARJcOVdEpuPKe6HoQi-uEbOAckrhbJQsa8PwM,19
|
|
141
141
|
infrahub/core/graph/constraints.py,sha256=lmuzrKDFoeSKRiLtycB9PXi6zhMYghczKrPYvfWyy90,10396
|
|
142
142
|
infrahub/core/graph/index.py,sha256=A9jzEE_wldBJsEsflODeMt4GM8sPmmbHAJRNdFioR1k,1736
|
|
143
143
|
infrahub/core/graph/schema.py,sha256=o50Jcy6GBRk55RkDJSMIDDwHhLD7y_RWOirI9rCex4A,10776
|
|
@@ -156,7 +156,7 @@ infrahub/core/ipam/utilization.py,sha256=d-zpXCaWsHgJxBLopCDd7y4sJYvHcIzzpYhbTMI
|
|
|
156
156
|
infrahub/core/manager.py,sha256=NaUuSY7Veesa67epQRuQ2TJD0-ooUSnvNRIUZCntV3g,47576
|
|
157
157
|
infrahub/core/merge.py,sha256=TNZpxjNYcl3dnvE8eYXaWSXFDYeEa8DDsS9XbR2XKlA,11217
|
|
158
158
|
infrahub/core/migrations/__init__.py,sha256=syPb3-Irf11dXCHgbT0UdmTnEBbpf4wXJ3m8ADYXDpk,1175
|
|
159
|
-
infrahub/core/migrations/graph/__init__.py,sha256=
|
|
159
|
+
infrahub/core/migrations/graph/__init__.py,sha256=q43iviiqSx7Aiml46u-zlxFYTTpNoCySfra5A30P3NU,4021
|
|
160
160
|
infrahub/core/migrations/graph/m001_add_version_to_graph.py,sha256=YcLN6cFjE6IGheXR4Ujb6CcyY8bJ7WE289hcKJaENOc,1515
|
|
161
161
|
infrahub/core/migrations/graph/m002_attribute_is_default.py,sha256=wB6f2N_ChTvGajqHD-OWCG5ahRMDhhXZuwo79ieq_II,1036
|
|
162
162
|
infrahub/core/migrations/graph/m003_relationship_parent_optional.py,sha256=Aya-s98XfE9C7YluOwEjilwgnjaBnZxp27w_Xdv_NmU,2330
|
|
@@ -192,6 +192,7 @@ infrahub/core/migrations/graph/m032_cleanup_orphaned_branch_relationships.py,sha
|
|
|
192
192
|
infrahub/core/migrations/graph/m033_deduplicate_relationship_vertices.py,sha256=EHsNyYEPYzqMybgrMefvE9tw-WUWmnh9ZF8FMVRl2wQ,3735
|
|
193
193
|
infrahub/core/migrations/graph/m034_find_orphaned_schema_fields.py,sha256=FekohfsamyLNzGBeRBiZML94tz2fUcvTzttfv6mD1cw,3547
|
|
194
194
|
infrahub/core/migrations/graph/m035_drop_attr_value_index.py,sha256=iAWLeKH6F1Zs4lS_3BmLI8K54xCriSXOrwdpNdNa7P0,1439
|
|
195
|
+
infrahub/core/migrations/graph/m036_index_attr_vals.py,sha256=PGbCYU3gqveOI1jl7T18DLTYRuLedbKysaP0-3k13k0,22730
|
|
195
196
|
infrahub/core/migrations/query/__init__.py,sha256=JoWOUWlV6IzwxWxObsfCnAAKUOHJkE7dZlOsfB64ZEo,876
|
|
196
197
|
infrahub/core/migrations/query/attribute_add.py,sha256=oitzB-PPAclfyNtcwCWJY3RdI5Zi4oEnR62BDzn1UQk,4835
|
|
197
198
|
infrahub/core/migrations/query/attribute_rename.py,sha256=onb9Nanht1Tz47JgneAcFsuhqqvPS6dvI2nNjRupLLo,6892
|
|
@@ -821,8 +822,8 @@ infrahub_testcontainers/models.py,sha256=ASYyvl7d_WQz_i7y8-3iab9hwwmCl3OCJavqVbe
|
|
|
821
822
|
infrahub_testcontainers/performance_test.py,sha256=hvwiy6tc_lWniYqGkqfOXVGAmA_IV15VOZqbiD9ezno,6149
|
|
822
823
|
infrahub_testcontainers/plugin.py,sha256=I3RuZQ0dARyKHuqCf0y1Yj731P2Mwf3BJUehRJKeWrs,5645
|
|
823
824
|
infrahub_testcontainers/prometheus.yml,sha256=610xQEyj3xuVJMzPkC4m1fRnCrjGpiRBrXA2ytCLa54,599
|
|
824
|
-
infrahub_server-1.4.
|
|
825
|
-
infrahub_server-1.4.
|
|
826
|
-
infrahub_server-1.4.
|
|
827
|
-
infrahub_server-1.4.
|
|
828
|
-
infrahub_server-1.4.
|
|
825
|
+
infrahub_server-1.4.0b1.dist-info/LICENSE.txt,sha256=7GQO7kxVoQYnZtFrjZBKLRXbrGwwwimHPPOJtqXsozQ,11340
|
|
826
|
+
infrahub_server-1.4.0b1.dist-info/METADATA,sha256=z_kgqH-ShqI6ud1WaOWBF4ZSkDYdouIw-ZuAKyiRB2I,8278
|
|
827
|
+
infrahub_server-1.4.0b1.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
828
|
+
infrahub_server-1.4.0b1.dist-info/entry_points.txt,sha256=UXIeFWDsrV-4IllNvUEd6KieYGzQfn9paga2YyABOQI,393
|
|
829
|
+
infrahub_server-1.4.0b1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|