infrahub-server 1.1.6__py3-none-any.whl → 1.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/core/attribute.py +4 -1
- infrahub/core/branch/tasks.py +7 -4
- infrahub/core/diff/combiner.py +11 -7
- infrahub/core/diff/coordinator.py +49 -70
- infrahub/core/diff/data_check_synchronizer.py +86 -7
- infrahub/core/diff/enricher/aggregated.py +3 -3
- infrahub/core/diff/enricher/cardinality_one.py +6 -6
- infrahub/core/diff/enricher/hierarchy.py +17 -4
- infrahub/core/diff/enricher/labels.py +18 -3
- infrahub/core/diff/enricher/path_identifier.py +7 -8
- infrahub/core/diff/merger/merger.py +5 -3
- infrahub/core/diff/model/path.py +66 -25
- infrahub/core/diff/parent_node_adder.py +78 -0
- infrahub/core/diff/payload_builder.py +13 -2
- infrahub/core/diff/query/all_conflicts.py +5 -2
- infrahub/core/diff/query/diff_get.py +2 -1
- infrahub/core/diff/query/field_specifiers.py +2 -0
- infrahub/core/diff/query/field_summary.py +2 -1
- infrahub/core/diff/query/filters.py +12 -1
- infrahub/core/diff/query/has_conflicts_query.py +5 -2
- infrahub/core/diff/query/{drop_tracking_id.py → merge_tracking_id.py} +3 -3
- infrahub/core/diff/query/roots_metadata.py +8 -1
- infrahub/core/diff/query/save.py +230 -139
- infrahub/core/diff/query/summary_counts_enricher.py +267 -0
- infrahub/core/diff/query/time_range_query.py +2 -1
- infrahub/core/diff/query_parser.py +49 -24
- infrahub/core/diff/repository/deserializer.py +31 -27
- infrahub/core/diff/repository/repository.py +215 -41
- infrahub/core/diff/tasks.py +4 -4
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/graph/index.py +3 -0
- infrahub/core/migrations/graph/__init__.py +4 -0
- infrahub/core/migrations/graph/m019_restore_rels_to_time.py +256 -0
- infrahub/core/migrations/graph/m020_duplicate_edges.py +160 -0
- infrahub/core/migrations/query/node_duplicate.py +38 -18
- infrahub/core/migrations/schema/node_remove.py +26 -12
- infrahub/core/migrations/shared.py +10 -8
- infrahub/core/node/__init__.py +19 -9
- infrahub/core/node/constraints/grouped_uniqueness.py +25 -5
- infrahub/core/node/ipam.py +6 -1
- infrahub/core/node/permissions.py +4 -0
- infrahub/core/query/attribute.py +2 -0
- infrahub/core/query/diff.py +41 -3
- infrahub/core/query/node.py +74 -21
- infrahub/core/query/relationship.py +107 -17
- infrahub/core/query/resource_manager.py +5 -1
- infrahub/core/relationship/model.py +8 -12
- infrahub/core/schema/definitions/core.py +1 -0
- infrahub/core/utils.py +1 -0
- infrahub/core/validators/uniqueness/query.py +20 -17
- infrahub/database/__init__.py +14 -0
- infrahub/dependencies/builder/constraint/grouped/node_runner.py +0 -2
- infrahub/dependencies/builder/diff/coordinator.py +0 -2
- infrahub/dependencies/builder/diff/deserializer.py +3 -1
- infrahub/dependencies/builder/diff/enricher/hierarchy.py +3 -1
- infrahub/dependencies/builder/diff/parent_node_adder.py +8 -0
- infrahub/graphql/mutations/computed_attribute.py +3 -1
- infrahub/graphql/mutations/diff.py +41 -10
- infrahub/graphql/mutations/main.py +11 -6
- infrahub/graphql/mutations/relationship.py +29 -1
- infrahub/graphql/mutations/resource_manager.py +3 -3
- infrahub/graphql/mutations/tasks.py +6 -3
- infrahub/graphql/queries/resource_manager.py +7 -3
- infrahub/permissions/__init__.py +2 -1
- infrahub/permissions/types.py +26 -0
- infrahub_sdk/client.py +10 -2
- infrahub_sdk/config.py +3 -0
- infrahub_sdk/ctl/check.py +3 -3
- infrahub_sdk/ctl/cli_commands.py +16 -11
- infrahub_sdk/ctl/exceptions.py +0 -6
- infrahub_sdk/ctl/exporter.py +1 -1
- infrahub_sdk/ctl/generator.py +5 -5
- infrahub_sdk/ctl/importer.py +3 -2
- infrahub_sdk/ctl/menu.py +1 -1
- infrahub_sdk/ctl/object.py +1 -1
- infrahub_sdk/ctl/repository.py +23 -15
- infrahub_sdk/ctl/schema.py +2 -2
- infrahub_sdk/ctl/utils.py +4 -3
- infrahub_sdk/ctl/validate.py +2 -1
- infrahub_sdk/exceptions.py +12 -0
- infrahub_sdk/generator.py +3 -0
- infrahub_sdk/node.py +7 -4
- infrahub_sdk/testing/schemas/animal.py +9 -0
- infrahub_sdk/utils.py +11 -1
- infrahub_sdk/yaml.py +2 -3
- {infrahub_server-1.1.6.dist-info → infrahub_server-1.1.8.dist-info}/METADATA +41 -7
- {infrahub_server-1.1.6.dist-info → infrahub_server-1.1.8.dist-info}/RECORD +94 -91
- infrahub_testcontainers/container.py +12 -3
- infrahub_testcontainers/docker-compose.test.yml +22 -3
- infrahub_testcontainers/haproxy.cfg +43 -0
- infrahub_testcontainers/helpers.py +85 -1
- infrahub/core/diff/enricher/summary_counts.py +0 -105
- infrahub/dependencies/builder/diff/enricher/summary_counts.py +0 -8
- infrahub_sdk/ctl/_file.py +0 -13
- {infrahub_server-1.1.6.dist-info → infrahub_server-1.1.8.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.1.6.dist-info → infrahub_server-1.1.8.dist-info}/WHEEL +0 -0
- {infrahub_server-1.1.6.dist-info → infrahub_server-1.1.8.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any, Sequence
|
|
4
|
+
|
|
5
|
+
from infrahub.core.migrations.shared import GraphMigration, MigrationResult
|
|
6
|
+
from infrahub.log import get_logger
|
|
7
|
+
|
|
8
|
+
from ...constants import GLOBAL_BRANCH_NAME, BranchSupportType
|
|
9
|
+
from ...query import Query, QueryType
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from infrahub.database import InfrahubDatabase
|
|
13
|
+
|
|
14
|
+
log = get_logger()
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class FixBranchAwareEdgesQuery(Query):
|
|
18
|
+
name = "replace_global_edges"
|
|
19
|
+
type = QueryType.WRITE
|
|
20
|
+
insert_return = False
|
|
21
|
+
|
|
22
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None:
|
|
23
|
+
"""
|
|
24
|
+
Between a Node and a Relationship, if Relationship.branch_support=aware, replace any global edge
|
|
25
|
+
to the branch of a non-global edge leaving out of the Relationship node. Note that there can't
|
|
26
|
+
be multiple non-global branches on these edges, as a dedicated Relationship node would exist for that.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
query = """
|
|
30
|
+
MATCH (node:Node)-[global_edge:IS_RELATED {branch: $global_branch}]-(rel:Relationship)
|
|
31
|
+
WHERE rel.branch_support=$branch_aware
|
|
32
|
+
MATCH (rel)-[non_global_edge:IS_RELATED]-(node_2: Node)
|
|
33
|
+
WHERE non_global_edge.branch <> $global_branch
|
|
34
|
+
SET global_edge.branch = non_global_edge.branch
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
params = {
|
|
38
|
+
"global_branch": GLOBAL_BRANCH_NAME,
|
|
39
|
+
"branch_aware": BranchSupportType.AWARE.value,
|
|
40
|
+
"branch_agnostic": BranchSupportType.AGNOSTIC.value,
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
self.params.update(params)
|
|
44
|
+
self.add_to_query(query)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class SetMissingToTimeQuery(Query):
|
|
48
|
+
name = "set_missing_to_time"
|
|
49
|
+
type = QueryType.WRITE
|
|
50
|
+
insert_return = False
|
|
51
|
+
|
|
52
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None:
|
|
53
|
+
"""
|
|
54
|
+
If both a deleted edge and an active edge with no time exist between 2 nodes on the same branch,
|
|
55
|
+
set `to` time of active edge using `from` time of the deleted one. This would typically happen after having
|
|
56
|
+
replaced a deleted edge on global branch by correct branch with above query.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
query = """
|
|
60
|
+
MATCH (node:Node)-[deleted_edge:IS_RELATED {status: "deleted"}]-(rel:Relationship)
|
|
61
|
+
MATCH (rel)-[active_edge:IS_RELATED {status: "active"}]-(node)
|
|
62
|
+
WHERE active_edge.to IS NULL AND deleted_edge.branch = active_edge.branch
|
|
63
|
+
SET active_edge.to = deleted_edge.from
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
self.add_to_query(query)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class DeleteNodesRelsQuery(Query):
|
|
70
|
+
name = "delete_relationships_of_deleted_nodes"
|
|
71
|
+
type = QueryType.WRITE
|
|
72
|
+
insert_return = False
|
|
73
|
+
|
|
74
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None:
|
|
75
|
+
"""
|
|
76
|
+
Some nodes may have been incorrectly deleted, typically, while these nodes edges connected to Root
|
|
77
|
+
are correctly deleted, edges connected to other `Node` through a `Relationship` node may still be active.
|
|
78
|
+
Following query correctly deletes these edges by both setting correct to time and creating corresponding deleted edge.
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
query = """
|
|
82
|
+
MATCH (deleted_node: Node)-[deleted_edge:IS_PART_OF {status: "deleted"}]->(:Root)
|
|
83
|
+
MATCH (deleted_node)-[:IS_RELATED]-(rel:Relationship)
|
|
84
|
+
|
|
85
|
+
// exclude nodes having been deleted through migration. find those with same uuid and exclude the one with earlier
|
|
86
|
+
// timestamp on active branch
|
|
87
|
+
WHERE NOT EXISTS {
|
|
88
|
+
MATCH (deleted_node)-[e1:IS_RELATED]-(rel)-[e2:IS_RELATED]-(other_node)
|
|
89
|
+
WITH deleted_node, other_node, MIN(e1.from) AS min_e1_from, MIN(e2.from) AS min_e2_from
|
|
90
|
+
WHERE deleted_node <> other_node AND deleted_node.uuid = other_node.uuid AND min_e1_from < min_e2_from
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
// Note that if an AWARE node has been deleted on a branch and relationship is AGNOSTIC, we do not "delete" this relationship
|
|
94
|
+
// right now as this aware node might exist on another branch.
|
|
95
|
+
|
|
96
|
+
// Set to time if there is an active edge:
|
|
97
|
+
// - on deleted edge branch
|
|
98
|
+
// - or on any branch and deleted node is agnostic
|
|
99
|
+
// - or deleted node is aware and rel is agnostic
|
|
100
|
+
CALL {
|
|
101
|
+
WITH rel, deleted_edge
|
|
102
|
+
OPTIONAL MATCH (rel)-[peer_active_edge {status: "active"}]-(peer_1)
|
|
103
|
+
WHERE (peer_active_edge.branch = deleted_edge.branch OR (rel.branch_support <> $branch_agnostic AND deleted_edge.branch = $global_branch))
|
|
104
|
+
AND peer_active_edge.to IS NULL
|
|
105
|
+
SET peer_active_edge.to = deleted_edge.from
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
// Get distinct rel nodes linked to a deleted node, with the time at which we should delete rel edges.
|
|
109
|
+
// Take the MAX time so if it does not take the deleted time of a node deleted through a duplication migration.
|
|
110
|
+
WITH DISTINCT rel,
|
|
111
|
+
deleted_edge.branch AS deleted_edge_branch,
|
|
112
|
+
deleted_edge.branch_level AS branch_level,
|
|
113
|
+
MAX(deleted_edge.from) as deleted_time,
|
|
114
|
+
deleted_node.branch_support as deleted_node_branch_support
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
// No need to check deleted edge branch because
|
|
118
|
+
// If deleted_node has different branch support type (agnostic/aware) than rel type,
|
|
119
|
+
// there might already be a deleted edge that we would not match if we filter on deleted_edge_branch.
|
|
120
|
+
// If both are aware, it still works, as we would have one Relationship node for each branch on which this relationship exists.
|
|
121
|
+
MATCH (rel)-[]-(peer_2)
|
|
122
|
+
WHERE NOT exists((rel)-[{status: "deleted"}]-(peer_2))
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
// If res is agnostic and delete node is agnostic, we should delete on global branch
|
|
126
|
+
// If rel is aware and deleted node is aware, we should use deleted edge branch
|
|
127
|
+
// If rel is aware and delete node is agnostic, we need to create deleted edges for every distinct branch on which this relationship exists.
|
|
128
|
+
WITH DISTINCT
|
|
129
|
+
CASE
|
|
130
|
+
// Branch on which `deleted` edge should be created depends on rel.branch_support.
|
|
131
|
+
WHEN rel.branch_support = $branch_agnostic
|
|
132
|
+
THEN CASE
|
|
133
|
+
WHEN deleted_node_branch_support = $branch_agnostic THEN [$global_branch]
|
|
134
|
+
ELSE []
|
|
135
|
+
END
|
|
136
|
+
ELSE
|
|
137
|
+
CASE
|
|
138
|
+
WHEN deleted_node_branch_support = $branch_agnostic
|
|
139
|
+
THEN COLLECT {
|
|
140
|
+
WITH rel
|
|
141
|
+
MATCH (rel)-[active_edge {status: "active"}]-(peer_2)
|
|
142
|
+
RETURN DISTINCT active_edge.branch
|
|
143
|
+
}
|
|
144
|
+
ELSE
|
|
145
|
+
CASE
|
|
146
|
+
// if no active edge on this branch exists it means this relationship node is dedicated for another branch
|
|
147
|
+
WHEN exists((rel)-[{status: "active", branch: deleted_edge_branch}]-(peer_2)) THEN [deleted_edge_branch]
|
|
148
|
+
ELSE []
|
|
149
|
+
END
|
|
150
|
+
END
|
|
151
|
+
END AS branches,
|
|
152
|
+
branch_level,
|
|
153
|
+
deleted_time,
|
|
154
|
+
peer_2,
|
|
155
|
+
rel
|
|
156
|
+
|
|
157
|
+
UNWIND branches as branch
|
|
158
|
+
|
|
159
|
+
// Then creates `deleted` edge.
|
|
160
|
+
// Below CALL subqueries are called once for each rel-peer_2 pair for which we want to create a deleted edge.
|
|
161
|
+
// Note that with current infrahub relationships edges design, only one of this CALL should be matched per pair.
|
|
162
|
+
|
|
163
|
+
CALL {
|
|
164
|
+
WITH rel, peer_2, branch, branch_level, deleted_time
|
|
165
|
+
MATCH (rel)-[:IS_RELATED]->(peer_2)
|
|
166
|
+
MERGE (rel)-[:IS_RELATED {status: "deleted", branch: branch, branch_level: branch_level, from: deleted_time}]->(peer_2)
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
CALL {
|
|
170
|
+
WITH rel, peer_2, branch, branch_level, deleted_time
|
|
171
|
+
MATCH (rel)-[:IS_PROTECTED]->(peer_2)
|
|
172
|
+
MERGE (rel)-[:IS_PROTECTED {status: "deleted", branch: branch, branch_level: branch_level, from: deleted_time}]->(peer_2)
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
CALL {
|
|
176
|
+
WITH rel, peer_2, branch, branch_level, deleted_time
|
|
177
|
+
MATCH (rel)-[:IS_VISIBLE]->(peer_2)
|
|
178
|
+
MERGE (rel)-[:IS_VISIBLE {status: "deleted", branch: branch, branch_level: branch_level, from: deleted_time}]->(peer_2)
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
CALL {
|
|
182
|
+
WITH rel, peer_2, branch, branch_level, deleted_time
|
|
183
|
+
MATCH (rel)-[:HAS_OWNER]->(peer_2)
|
|
184
|
+
MERGE (rel)-[:HAS_OWNER {status: "deleted", branch: branch, branch_level: branch_level, from: deleted_time}]->(peer_2)
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
CALL {
|
|
188
|
+
WITH rel, peer_2, branch, branch_level, deleted_time
|
|
189
|
+
MATCH (rel)-[:HAS_SOURCE]->(peer_2)
|
|
190
|
+
MERGE (rel)-[:HAS_SOURCE {status: "deleted", branch: branch, branch_level: branch_level, from: deleted_time}]->(peer_2)
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
CALL {
|
|
194
|
+
WITH rel, peer_2, branch, branch_level, deleted_time
|
|
195
|
+
MATCH (rel)<-[:IS_RELATED]-(peer_2)
|
|
196
|
+
MERGE (rel)<-[:IS_RELATED {status: "deleted", branch: branch, branch_level: branch_level, from: deleted_time}]-(peer_2)
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
CALL {
|
|
200
|
+
WITH rel, peer_2, branch, branch_level, deleted_time
|
|
201
|
+
MATCH (rel)<-[:IS_PROTECTED]-(peer_2)
|
|
202
|
+
MERGE (rel)<-[:IS_PROTECTED {status: "deleted", branch: branch, branch_level: branch_level, from: deleted_time}]-(peer_2)
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
CALL {
|
|
206
|
+
WITH rel, peer_2, branch, branch_level, deleted_time
|
|
207
|
+
MATCH (rel)<-[:IS_VISIBLE]-(peer_2)
|
|
208
|
+
MERGE (rel)<-[:IS_VISIBLE {status: "deleted", branch: branch, branch_level: branch_level, from: deleted_time}]-(peer_2)
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
CALL {
|
|
212
|
+
WITH rel, peer_2, branch, branch_level, deleted_time
|
|
213
|
+
MATCH (rel)<-[:HAS_OWNER]-(peer_2)
|
|
214
|
+
MERGE (rel)<-[:HAS_OWNER {status: "deleted", branch: branch, branch_level: branch_level, from: deleted_time}]-(peer_2)
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
CALL {
|
|
218
|
+
WITH rel, peer_2, branch, branch_level, deleted_time
|
|
219
|
+
MATCH (rel)<-[:HAS_SOURCE]-(peer_2)
|
|
220
|
+
MERGE (rel)<-[:HAS_SOURCE {status: "deleted", branch: branch, branch_level: branch_level, from: deleted_time}]-(peer_2)
|
|
221
|
+
}
|
|
222
|
+
"""
|
|
223
|
+
|
|
224
|
+
params = {
|
|
225
|
+
"global_branch": GLOBAL_BRANCH_NAME,
|
|
226
|
+
"branch_aware": BranchSupportType.AWARE.value,
|
|
227
|
+
"branch_agnostic": BranchSupportType.AGNOSTIC.value,
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
self.params.update(params)
|
|
231
|
+
self.add_to_query(query)
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
class Migration019(GraphMigration):
|
|
235
|
+
"""
|
|
236
|
+
Fix corrupted state introduced by Migration012 when duplicating a CoreAccount (branch Aware)
|
|
237
|
+
being part of a CoreStandardGroup (branch Agnostic). Database is corrupted at multiple points:
|
|
238
|
+
- Old CoreAccount node <> group_member node `active` edge has no `to` time (possibly because of #5590).
|
|
239
|
+
- Old CoreAccount node <> group_member node `deleted` edge is on `$global_branch` branch instead of `main`.
|
|
240
|
+
- New CoreAccount node <> group_member node `active` edge is on `$global_branch` branch instead of `main`.
|
|
241
|
+
|
|
242
|
+
Also, users having deleted corresponding CoreStandardGroup will also have the following data corruption,
|
|
243
|
+
as deletion did not happen correctly due to above issues:
|
|
244
|
+
- Both CoreAccount <> group_member and CoreStandardGroup <> group_member edges
|
|
245
|
+
have not been deleted (ie status is `active` without `to` time and no additional `deleted` edge).
|
|
246
|
+
|
|
247
|
+
This migration fixes all above issues to have consistent edges, and fixes IFC-1204.
|
|
248
|
+
"""
|
|
249
|
+
|
|
250
|
+
name: str = "019_fix_edges_state"
|
|
251
|
+
minimum_version: int = 18
|
|
252
|
+
queries: Sequence[type[Query]] = [FixBranchAwareEdgesQuery, SetMissingToTimeQuery, DeleteNodesRelsQuery]
|
|
253
|
+
|
|
254
|
+
async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult:
|
|
255
|
+
result = MigrationResult()
|
|
256
|
+
return result
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any, Sequence
|
|
4
|
+
|
|
5
|
+
from infrahub.core.constants.database import DatabaseEdgeType
|
|
6
|
+
from infrahub.core.migrations.shared import GraphMigration, MigrationResult
|
|
7
|
+
from infrahub.log import get_logger
|
|
8
|
+
|
|
9
|
+
from ...query import Query, QueryType
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from infrahub.database import InfrahubDatabase
|
|
13
|
+
|
|
14
|
+
log = get_logger()
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class DeleteDuplicateHasValueEdgesQuery(Query):
|
|
18
|
+
name = "delete_duplicate_has_value_edges"
|
|
19
|
+
type = QueryType.WRITE
|
|
20
|
+
insert_return = False
|
|
21
|
+
|
|
22
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None:
|
|
23
|
+
query = """
|
|
24
|
+
// -------------------
|
|
25
|
+
// find Attribute nodes with multiple identical edges to AttributeValue nodes with the same value
|
|
26
|
+
// -------------------
|
|
27
|
+
MATCH (a:Attribute)-[e:HAS_VALUE]->(av:AttributeValue)
|
|
28
|
+
WITH a, e.branch AS branch, e.branch_level AS branch_level, e.status AS status, e.from AS from, e.to AS to,
|
|
29
|
+
av.value AS attr_val, av.is_default AS attr_default, COUNT(*) AS num_duplicate_edges
|
|
30
|
+
WHERE num_duplicate_edges > 1
|
|
31
|
+
// -------------------
|
|
32
|
+
// get the the one AttributeValue we want to use
|
|
33
|
+
// -------------------
|
|
34
|
+
WITH DISTINCT a, branch, branch_level, status, from, to, attr_val, attr_default
|
|
35
|
+
WITH attr_val, attr_default, collect([a, branch, branch_level, status, from, to]) AS details_list
|
|
36
|
+
CALL {
|
|
37
|
+
WITH attr_val, attr_default
|
|
38
|
+
MATCH (av:AttributeValue {value: attr_val, is_default: attr_default})
|
|
39
|
+
RETURN av AS the_one_av
|
|
40
|
+
ORDER by %(id_func)s(av) ASC
|
|
41
|
+
LIMIT 1
|
|
42
|
+
}
|
|
43
|
+
UNWIND details_list AS details_item
|
|
44
|
+
WITH attr_val, attr_default, the_one_av,
|
|
45
|
+
details_item[0] AS a, details_item[1] AS branch, details_item[2] AS branch_level,
|
|
46
|
+
details_item[3] AS status, details_item[4] AS from, details_item[5] AS to
|
|
47
|
+
// -------------------
|
|
48
|
+
// get/create the one edge to keep
|
|
49
|
+
// -------------------
|
|
50
|
+
CREATE (a)-[fresh_e:HAS_VALUE {branch: branch, branch_level: branch_level, status: status, from: from}]->(the_one_av)
|
|
51
|
+
SET fresh_e.to = to
|
|
52
|
+
WITH a, branch, status, from, to, attr_val, attr_default, %(id_func)s(fresh_e) AS e_id_to_keep
|
|
53
|
+
// -------------------
|
|
54
|
+
// get the identical edges for a given set of Attribute node, edge properties, AttributeValue.value
|
|
55
|
+
// -------------------
|
|
56
|
+
CALL {
|
|
57
|
+
// -------------------
|
|
58
|
+
// delete the duplicate edges a given set of Attribute node, edge properties, AttributeValue.value
|
|
59
|
+
// -------------------
|
|
60
|
+
WITH a, branch, status, from, to, attr_val, attr_default, e_id_to_keep
|
|
61
|
+
MATCH (a)-[e:HAS_VALUE]->(av:AttributeValue {value: attr_val, is_default: attr_default})
|
|
62
|
+
WHERE %(id_func)s(e) <> e_id_to_keep
|
|
63
|
+
AND e.branch = branch AND e.status = status AND e.from = from
|
|
64
|
+
AND (e.to = to OR (e.to IS NULL AND to IS NULL))
|
|
65
|
+
DELETE e
|
|
66
|
+
}
|
|
67
|
+
// -------------------
|
|
68
|
+
// delete any orphaned AttributeValue nodes
|
|
69
|
+
// -------------------
|
|
70
|
+
WITH NULL AS nothing
|
|
71
|
+
LIMIT 1
|
|
72
|
+
MATCH (orphaned_av:AttributeValue)
|
|
73
|
+
WHERE NOT exists((orphaned_av)-[]-())
|
|
74
|
+
DELETE orphaned_av
|
|
75
|
+
""" % {"id_func": db.get_id_function_name()}
|
|
76
|
+
self.add_to_query(query)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class DeleteDuplicateBooleanEdgesQuery(Query):
|
|
80
|
+
name = "delete_duplicate_booleans_edges"
|
|
81
|
+
type = QueryType.WRITE
|
|
82
|
+
insert_return = False
|
|
83
|
+
edge_type: DatabaseEdgeType | None = None
|
|
84
|
+
|
|
85
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None:
|
|
86
|
+
if not self.edge_type:
|
|
87
|
+
raise RuntimeError("edge_type is required for this query")
|
|
88
|
+
query = """
|
|
89
|
+
// -------------------
|
|
90
|
+
// find Attribute nodes with multiple identical edges to Boolean nodes
|
|
91
|
+
// -------------------
|
|
92
|
+
MATCH (a:Attribute)-[e:%(edge_type)s]->(b)
|
|
93
|
+
WITH a, e.branch AS branch, e.branch_level AS branch_level, e.status AS status, e.from AS from, e.to AS to, b, COUNT(*) AS num_duplicate_edges
|
|
94
|
+
WHERE num_duplicate_edges > 1
|
|
95
|
+
// -------------------
|
|
96
|
+
// get the identical edges for a given set of Attribute node, edge properties, Boolean
|
|
97
|
+
// -------------------
|
|
98
|
+
WITH DISTINCT a, branch, branch_level, status, from, to, b
|
|
99
|
+
CREATE (a)-[fresh_e:%(edge_type)s {branch: branch, branch_level: branch_level, status: status, from: from}]->(b)
|
|
100
|
+
SET fresh_e.to = to
|
|
101
|
+
WITH a, branch, status, from, to, b, %(id_func)s(fresh_e) AS e_id_to_keep
|
|
102
|
+
CALL {
|
|
103
|
+
WITH a, branch, status, from, to, b, e_id_to_keep
|
|
104
|
+
MATCH (a)-[e:%(edge_type)s]->(b)
|
|
105
|
+
WHERE %(id_func)s(e) <> e_id_to_keep
|
|
106
|
+
AND e.branch = branch AND e.status = status AND e.from = from
|
|
107
|
+
AND (e.to = to OR (e.to IS NULL AND to IS NULL))
|
|
108
|
+
DELETE e
|
|
109
|
+
}
|
|
110
|
+
""" % {"edge_type": self.edge_type.value, "id_func": db.get_id_function_name()}
|
|
111
|
+
self.add_to_query(query)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class DeleteDuplicateIsVisibleEdgesQuery(DeleteDuplicateBooleanEdgesQuery):
|
|
115
|
+
name = "delete_duplicate_is_visible_edges"
|
|
116
|
+
type = QueryType.WRITE
|
|
117
|
+
insert_return = False
|
|
118
|
+
edge_type = DatabaseEdgeType.IS_VISIBLE
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class DeleteDuplicateIsProtectedEdgesQuery(DeleteDuplicateBooleanEdgesQuery):
|
|
122
|
+
name = "delete_duplicate_is_protected_edges"
|
|
123
|
+
type = QueryType.WRITE
|
|
124
|
+
insert_return = False
|
|
125
|
+
edge_type = DatabaseEdgeType.IS_PROTECTED
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
class Migration020(GraphMigration):
|
|
129
|
+
"""
|
|
130
|
+
1. Find duplicate edges. These can be duplicated if multiple AttributeValue nodes with the same value exist b/c of concurrent
|
|
131
|
+
database updates.
|
|
132
|
+
a. (a:Attribute)-[e:HAS_VALUE]->(av:AttributeValue)
|
|
133
|
+
grouped by (a, e.branch, e.from, e.to, e.status, av.value, av.is_default) to determine the number of duplicates.
|
|
134
|
+
b. (a:Attribute)-[e:HAS_VALUE]->(b:Boolean)
|
|
135
|
+
grouped by (a, e.branch, e.from, e.status, b) to determine the number of duplicates.
|
|
136
|
+
2. For a given set of duplicate edges
|
|
137
|
+
a. delete all of the duplicate edges
|
|
138
|
+
b. merge one edge with the properties of the deleted edges
|
|
139
|
+
3. If there are any orphaned AttributeValue nodes after these changes, then delete them
|
|
140
|
+
|
|
141
|
+
This migration does not account for consolidating duplicated AttributeValue nodes because more might be created
|
|
142
|
+
in the future due to concurrent database updates. A migration to consolidate duplicated AttributeValue nodes
|
|
143
|
+
should be run when we find a way to stop duplicate AttributeValue nodes from being created
|
|
144
|
+
"""
|
|
145
|
+
|
|
146
|
+
name: str = "020_delete_duplicate_edges"
|
|
147
|
+
minimum_version: int = 19
|
|
148
|
+
queries: Sequence[type[Query]] = [
|
|
149
|
+
DeleteDuplicateHasValueEdgesQuery,
|
|
150
|
+
DeleteDuplicateIsVisibleEdgesQuery,
|
|
151
|
+
DeleteDuplicateIsProtectedEdgesQuery,
|
|
152
|
+
]
|
|
153
|
+
|
|
154
|
+
async def execute(self, db: InfrahubDatabase) -> MigrationResult:
|
|
155
|
+
# skip the transaction b/c it will run out of memory on a large database
|
|
156
|
+
return await self.do_execute(db=db)
|
|
157
|
+
|
|
158
|
+
async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult:
|
|
159
|
+
result = MigrationResult()
|
|
160
|
+
return result
|
|
@@ -48,19 +48,37 @@ class NodeDuplicateQuery(Query):
|
|
|
48
48
|
|
|
49
49
|
@staticmethod
|
|
50
50
|
def _render_sub_query_per_rel_type(
|
|
51
|
-
rel_name: str,
|
|
51
|
+
rel_name: str,
|
|
52
|
+
rel_type: str,
|
|
53
|
+
rel_def: FieldInfo,
|
|
52
54
|
) -> str:
|
|
53
55
|
subquery = [
|
|
54
56
|
f"WITH peer_node, {rel_name}, active_node, new_node",
|
|
55
57
|
f"WITH peer_node, {rel_name}, active_node, new_node",
|
|
56
58
|
f'WHERE type({rel_name}) = "{rel_type}"',
|
|
57
59
|
]
|
|
58
|
-
if rel_def.default.direction in [
|
|
59
|
-
subquery.append(f"
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
60
|
+
if rel_def.default.direction in [GraphRelDirection.OUTBOUND, GraphRelDirection.EITHER]:
|
|
61
|
+
subquery.append(f"""
|
|
62
|
+
CREATE (new_node)-[new_active_edge:{rel_type} $rel_props_new ]->(peer_node)
|
|
63
|
+
SET new_active_edge.branch = CASE WHEN {rel_name}.branch = "-global-" THEN "-global-" ELSE $branch END
|
|
64
|
+
SET new_active_edge.branch_level = CASE WHEN {rel_name}.branch = "-global-" THEN {rel_name}.branch_level ELSE $branch_level END
|
|
65
|
+
""")
|
|
66
|
+
subquery.append(f"""
|
|
67
|
+
CREATE (active_node)-[deleted_edge:{rel_type} $rel_props_prev ]->(peer_node)
|
|
68
|
+
SET deleted_edge.branch = CASE WHEN {rel_name}.branch = "-global-" THEN "-global-" ELSE $branch END
|
|
69
|
+
SET deleted_edge.branch_level = CASE WHEN {rel_name}.branch = "-global-" THEN {rel_name}.branch_level ELSE $branch_level END
|
|
70
|
+
""")
|
|
71
|
+
elif rel_def.default.direction in [GraphRelDirection.INBOUND, GraphRelDirection.EITHER]:
|
|
72
|
+
subquery.append(f"""
|
|
73
|
+
CREATE (new_node)<-[new_active_edge:{rel_type} $rel_props_new ]-(peer_node)
|
|
74
|
+
SET new_active_edge.branch = CASE WHEN {rel_name}.branch = "-global-" THEN "-global-" ELSE $branch END
|
|
75
|
+
SET new_active_edge.branch_level = CASE WHEN {rel_name}.branch = "-global-" THEN {rel_name}.branch_level ELSE $branch_level END
|
|
76
|
+
""")
|
|
77
|
+
subquery.append(f"""
|
|
78
|
+
CREATE (active_node)<-[deleted_edge:{rel_type} $rel_props_prev ]-(peer_node)
|
|
79
|
+
SET new_active_edge.branch = CASE WHEN {rel_name}.branch = "-global-" THEN "-global-" ELSE $branch END
|
|
80
|
+
SET new_active_edge.branch_level = CASE WHEN {rel_name}.branch = "-global-" THEN {rel_name}.branch_level ELSE $branch_level END
|
|
81
|
+
""")
|
|
64
82
|
subquery.append("RETURN peer_node as p2")
|
|
65
83
|
return "\n".join(subquery)
|
|
66
84
|
|
|
@@ -68,7 +86,9 @@ class NodeDuplicateQuery(Query):
|
|
|
68
86
|
def _render_sub_query_out(cls) -> str:
|
|
69
87
|
sub_queries_out = [
|
|
70
88
|
cls._render_sub_query_per_rel_type(
|
|
71
|
-
rel_name="rel_outband",
|
|
89
|
+
rel_name="rel_outband",
|
|
90
|
+
rel_type=rel_type,
|
|
91
|
+
rel_def=rel_def,
|
|
72
92
|
)
|
|
73
93
|
for rel_type, rel_def in GraphNodeRelationships.model_fields.items()
|
|
74
94
|
]
|
|
@@ -79,7 +99,9 @@ class NodeDuplicateQuery(Query):
|
|
|
79
99
|
def _render_sub_query_in(cls) -> str:
|
|
80
100
|
sub_queries_in = [
|
|
81
101
|
cls._render_sub_query_per_rel_type(
|
|
82
|
-
rel_name="rel_inband",
|
|
102
|
+
rel_name="rel_inband",
|
|
103
|
+
rel_type=rel_type,
|
|
104
|
+
rel_def=rel_def,
|
|
83
105
|
)
|
|
84
106
|
for rel_type, rel_def in GraphNodeRelationships.model_fields.items()
|
|
85
107
|
]
|
|
@@ -94,19 +116,16 @@ class NodeDuplicateQuery(Query):
|
|
|
94
116
|
self.params["previous_node"] = self.previous_node.model_dump()
|
|
95
117
|
|
|
96
118
|
self.params["current_time"] = self.at.to_string()
|
|
97
|
-
self.params["
|
|
119
|
+
self.params["branch"] = self.branch.name
|
|
120
|
+
self.params["branch_level"] = self.branch.hierarchy_level
|
|
98
121
|
self.params["branch_support"] = self.new_node.branch_support
|
|
99
122
|
|
|
100
123
|
self.params["rel_props_new"] = {
|
|
101
|
-
"branch": self.branch.name,
|
|
102
|
-
"branch_level": self.branch.hierarchy_level,
|
|
103
124
|
"status": RelationshipStatus.ACTIVE.value,
|
|
104
125
|
"from": self.at.to_string(),
|
|
105
126
|
}
|
|
106
127
|
|
|
107
128
|
self.params["rel_props_prev"] = {
|
|
108
|
-
"branch": self.branch.name,
|
|
109
|
-
"branch_level": self.branch.hierarchy_level,
|
|
110
129
|
"status": RelationshipStatus.DELETED.value,
|
|
111
130
|
"from": self.at.to_string(),
|
|
112
131
|
}
|
|
@@ -141,12 +160,12 @@ class NodeDuplicateQuery(Query):
|
|
|
141
160
|
LIMIT 1
|
|
142
161
|
}
|
|
143
162
|
WITH n1 as active_node, rel_outband1 as rel_outband, p1 as peer_node, new_node
|
|
144
|
-
WHERE rel_outband.status = "active"
|
|
163
|
+
WHERE rel_outband.status = "active" AND rel_outband.to IS NULL
|
|
145
164
|
CALL {
|
|
146
165
|
%(sub_query_out)s
|
|
147
166
|
}
|
|
148
167
|
WITH p2 as peer_node, rel_outband, active_node, new_node
|
|
149
|
-
FOREACH (i in CASE WHEN rel_outband.branch
|
|
168
|
+
FOREACH (i in CASE WHEN rel_outband.branch IN ["-global-", $branch] THEN [1] ELSE [] END |
|
|
150
169
|
SET rel_outband.to = $current_time
|
|
151
170
|
)
|
|
152
171
|
WITH active_node, new_node
|
|
@@ -160,14 +179,15 @@ class NodeDuplicateQuery(Query):
|
|
|
160
179
|
LIMIT 1
|
|
161
180
|
}
|
|
162
181
|
WITH n1 as active_node, rel_inband1 as rel_inband, p1 as peer_node, new_node
|
|
163
|
-
WHERE rel_inband.status = "active"
|
|
182
|
+
WHERE rel_inband.status = "active" AND rel_inband.to IS NULL
|
|
164
183
|
CALL {
|
|
165
184
|
%(sub_query_in)s
|
|
166
185
|
}
|
|
167
186
|
WITH p2 as peer_node, rel_inband, active_node, new_node
|
|
168
|
-
FOREACH (i in CASE WHEN rel_inband.branch
|
|
187
|
+
FOREACH (i in CASE WHEN rel_inband.branch IN ["-global-", $branch] THEN [1] ELSE [] END |
|
|
169
188
|
SET rel_inband.to = $current_time
|
|
170
189
|
)
|
|
190
|
+
|
|
171
191
|
RETURN DISTINCT new_node
|
|
172
192
|
""" % {
|
|
173
193
|
"branch_filter": branch_filter,
|
|
@@ -15,17 +15,28 @@ if TYPE_CHECKING:
|
|
|
15
15
|
|
|
16
16
|
class NodeRemoveMigrationBaseQuery(MigrationQuery):
|
|
17
17
|
def render_sub_query_per_rel_type(
|
|
18
|
-
self,
|
|
18
|
+
self,
|
|
19
|
+
rel_name: str,
|
|
20
|
+
rel_type: str,
|
|
21
|
+
rel_def: FieldInfo,
|
|
19
22
|
) -> str:
|
|
20
23
|
subquery = [
|
|
21
24
|
f"WITH peer_node, {rel_name}, active_node",
|
|
22
25
|
f"WITH peer_node, {rel_name}, active_node",
|
|
23
26
|
f'WHERE type({rel_name}) = "{rel_type}"',
|
|
24
27
|
]
|
|
25
|
-
if rel_def.default.direction in [
|
|
26
|
-
subquery.append(f"
|
|
27
|
-
|
|
28
|
-
|
|
28
|
+
if rel_def.default.direction in [GraphRelDirection.OUTBOUND, GraphRelDirection.EITHER]:
|
|
29
|
+
subquery.append(f"""
|
|
30
|
+
CREATE (active_node)-[edge:{rel_type} $rel_props ]->(peer_node)
|
|
31
|
+
SET edge.branch = CASE WHEN {rel_name}.branch = "-global-" THEN "-global-" ELSE $branch END
|
|
32
|
+
SET edge.branch_level = CASE WHEN {rel_name}.branch = "-global-" THEN {rel_name}.branch_level ELSE $branch_level END
|
|
33
|
+
""")
|
|
34
|
+
elif rel_def.default.direction in [GraphRelDirection.INBOUND, GraphRelDirection.EITHER]:
|
|
35
|
+
subquery.append(f"""
|
|
36
|
+
CREATE (active_node)<-[edge:{rel_type} $rel_props ]-(peer_node)
|
|
37
|
+
SET edge.branch = CASE WHEN {rel_name}.branch = "-global-" THEN "-global-" ELSE $branch END
|
|
38
|
+
SET edge.branch_level = CASE WHEN {rel_name}.branch = "-global-" THEN {rel_name}.branch_level ELSE $branch_level END
|
|
39
|
+
""")
|
|
29
40
|
subquery.append("RETURN peer_node as p2")
|
|
30
41
|
return "\n".join(subquery)
|
|
31
42
|
|
|
@@ -38,10 +49,10 @@ class NodeRemoveMigrationBaseQuery(MigrationQuery):
|
|
|
38
49
|
|
|
39
50
|
self.params["current_time"] = self.at.to_string()
|
|
40
51
|
self.params["branch_name"] = self.branch.name
|
|
52
|
+
self.params["branch"] = self.branch.name
|
|
53
|
+
self.params["branch_level"] = self.branch.hierarchy_level
|
|
41
54
|
|
|
42
55
|
self.params["rel_props"] = {
|
|
43
|
-
"branch": self.branch.name,
|
|
44
|
-
"branch_level": self.branch.hierarchy_level,
|
|
45
56
|
"status": RelationshipStatus.DELETED.value,
|
|
46
57
|
"from": self.at.to_string(),
|
|
47
58
|
}
|
|
@@ -99,7 +110,7 @@ class NodeRemoveMigrationQueryIn(NodeRemoveMigrationBaseQuery):
|
|
|
99
110
|
%(sub_query)s
|
|
100
111
|
}
|
|
101
112
|
WITH p2 as peer_node, rel_inband, active_node
|
|
102
|
-
FOREACH (i in CASE WHEN rel_inband.branch
|
|
113
|
+
FOREACH (i in CASE WHEN rel_inband.branch IN ["-global-", $branch] THEN [1] ELSE [] END |
|
|
103
114
|
SET rel_inband.to = $current_time
|
|
104
115
|
)
|
|
105
116
|
""" % {"sub_query": sub_query, "branch_filter": branch_filter}
|
|
@@ -108,7 +119,9 @@ class NodeRemoveMigrationQueryIn(NodeRemoveMigrationBaseQuery):
|
|
|
108
119
|
def render_sub_query_in(self) -> str:
|
|
109
120
|
sub_queries_in = [
|
|
110
121
|
self.render_sub_query_per_rel_type(
|
|
111
|
-
rel_name="rel_inband",
|
|
122
|
+
rel_name="rel_inband",
|
|
123
|
+
rel_type=rel_type,
|
|
124
|
+
rel_def=rel_def,
|
|
112
125
|
)
|
|
113
126
|
for rel_type, rel_def in GraphNodeRelationships.model_fields.items()
|
|
114
127
|
]
|
|
@@ -142,8 +155,7 @@ class NodeRemoveMigrationQueryOut(NodeRemoveMigrationBaseQuery):
|
|
|
142
155
|
CALL {
|
|
143
156
|
%(sub_query)s
|
|
144
157
|
}
|
|
145
|
-
|
|
146
|
-
FOREACH (i in CASE WHEN rel_outband.branch = $branch_name THEN [1] ELSE [] END |
|
|
158
|
+
FOREACH (i in CASE WHEN rel_outband.branch IN ["-global-", $branch] THEN [1] ELSE [] END |
|
|
147
159
|
SET rel_outband.to = $current_time
|
|
148
160
|
)
|
|
149
161
|
""" % {"sub_query": sub_query, "branch_filter": branch_filter}
|
|
@@ -153,7 +165,9 @@ class NodeRemoveMigrationQueryOut(NodeRemoveMigrationBaseQuery):
|
|
|
153
165
|
def render_sub_query_out(self) -> str:
|
|
154
166
|
sub_queries_out = [
|
|
155
167
|
self.render_sub_query_per_rel_type(
|
|
156
|
-
rel_name="rel_outband",
|
|
168
|
+
rel_name="rel_outband",
|
|
169
|
+
rel_type=rel_type,
|
|
170
|
+
rel_def=rel_def,
|
|
157
171
|
)
|
|
158
172
|
for rel_type, rel_def in GraphNodeRelationships.model_fields.items()
|
|
159
173
|
]
|
|
@@ -120,15 +120,17 @@ class GraphMigration(BaseModel):
|
|
|
120
120
|
|
|
121
121
|
async def execute(self, db: InfrahubDatabase) -> MigrationResult:
|
|
122
122
|
async with db.start_transaction() as ts:
|
|
123
|
-
|
|
123
|
+
return await self.do_execute(db=ts)
|
|
124
124
|
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
125
|
+
async def do_execute(self, db: InfrahubDatabase) -> MigrationResult:
|
|
126
|
+
result = MigrationResult()
|
|
127
|
+
for migration_query in self.queries:
|
|
128
|
+
try:
|
|
129
|
+
query = await migration_query.init(db=db)
|
|
130
|
+
await query.execute(db=db)
|
|
131
|
+
except Exception as exc: # pylint: disable=broad-exception-caught
|
|
132
|
+
result.errors.append(str(exc))
|
|
133
|
+
return result
|
|
132
134
|
|
|
133
135
|
return result
|
|
134
136
|
|