infrahub-server 1.4.11__py3-none-any.whl → 1.4.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
infrahub/cli/tasks.py CHANGED
@@ -3,9 +3,11 @@ import logging
3
3
  import typer
4
4
  from infrahub_sdk.async_typer import AsyncTyper
5
5
  from prefect.client.orchestration import get_client
6
+ from prefect.client.schemas.objects import StateType
6
7
 
7
8
  from infrahub import config
8
9
  from infrahub.services.adapters.workflow.worker import WorkflowWorkerExecution
10
+ from infrahub.task_manager.task import PrefectTask
9
11
  from infrahub.tasks.dummy import DUMMY_FLOW, DummyInput
10
12
  from infrahub.workflows.initialization import setup_task_manager
11
13
  from infrahub.workflows.models import WorkerPoolDefinition
@@ -50,3 +52,47 @@ async def execute(
50
52
  workflow=DUMMY_FLOW, parameters={"data": DummyInput(firstname="John", lastname="Doe")}
51
53
  ) # type: ignore[var-annotated]
52
54
  print(result)
55
+
56
+
57
+ flush_app = AsyncTyper()
58
+
59
+ app.add_typer(flush_app, name="flush")
60
+
61
+
62
+ @flush_app.command()
63
+ async def flow_runs(
64
+ ctx: typer.Context, # noqa: ARG001
65
+ config_file: str = typer.Argument("infrahub.toml", envvar="INFRAHUB_CONFIG"),
66
+ days_to_keep: int = 30,
67
+ batch_size: int = 100,
68
+ ) -> None:
69
+ """Flush old task runs"""
70
+ logging.getLogger("infrahub").setLevel(logging.WARNING)
71
+ logging.getLogger("neo4j").setLevel(logging.ERROR)
72
+ logging.getLogger("prefect").setLevel(logging.ERROR)
73
+
74
+ config.load_and_exit(config_file_name=config_file)
75
+
76
+ await PrefectTask.delete_flow_runs(
77
+ days_to_keep=days_to_keep,
78
+ batch_size=batch_size,
79
+ )
80
+
81
+
82
+ @flush_app.command()
83
+ async def stale_runs(
84
+ ctx: typer.Context, # noqa: ARG001
85
+ config_file: str = typer.Argument("infrahub.toml", envvar="INFRAHUB_CONFIG"),
86
+ days_to_keep: int = 2,
87
+ batch_size: int = 100,
88
+ ) -> None:
89
+ """Flush stale task runs"""
90
+ logging.getLogger("infrahub").setLevel(logging.WARNING)
91
+ logging.getLogger("neo4j").setLevel(logging.ERROR)
92
+ logging.getLogger("prefect").setLevel(logging.ERROR)
93
+
94
+ config.load_and_exit(config_file_name=config_file)
95
+
96
+ await PrefectTask.delete_flow_runs(
97
+ states=[StateType.RUNNING], delete=False, days_to_keep=days_to_keep, batch_size=batch_size
98
+ )
@@ -141,8 +141,8 @@ class DiffCalculator:
141
141
  to_time=to_time,
142
142
  previous_node_field_specifiers=previous_node_specifiers,
143
143
  )
144
- node_limit = int(config.SETTINGS.database.query_size_limit / 10)
145
- fields_limit = int(config.SETTINGS.database.query_size_limit / 3)
144
+ node_limit = max(int(config.SETTINGS.database.query_size_limit / 10), 1)
145
+ fields_limit = max(int(config.SETTINGS.database.query_size_limit / 3), 1)
146
146
  properties_limit = config.SETTINGS.database.query_size_limit
147
147
 
148
148
  calculation_request = DiffCalculationRequest(
@@ -20,10 +20,14 @@ class EnrichedDiffDeleteQuery(Query):
20
20
  diff_filter = "WHERE d_root.uuid IN $diff_root_uuids"
21
21
 
22
22
  query = """
23
- MATCH (d_root:DiffRoot)
24
- %(diff_filter)s
25
- OPTIONAL MATCH (d_root)-[*]->(diff_thing)
26
- DETACH DELETE diff_thing
27
- DETACH DELETE d_root
23
+ MATCH (d_root:DiffRoot)
24
+ %(diff_filter)s
25
+ OPTIONAL MATCH (d_root)-[*]->(diff_thing)
26
+ WITH DISTINCT d_root, diff_thing
27
+ ORDER BY elementId(diff_thing)
28
+ CALL (diff_thing) {
29
+ DETACH DELETE diff_thing
30
+ } IN TRANSACTIONS
31
+ DETACH DELETE d_root
28
32
  """ % {"diff_filter": diff_filter}
29
33
  self.add_to_query(query=query)
@@ -49,11 +49,18 @@ WITH node_diff_map, is_node_kind_migration, CASE
49
49
  WHEN is_node_kind_migration THEN $migrated_kinds_id_map[node_diff_map.uuid]
50
50
  ELSE NULL
51
51
  END AS node_db_id
52
+
53
+ // ------------------------------
54
+ // find the correct Node if the Node had its kind/inheritance migrated
55
+ // and there are multiple Nodes with the same UUID
56
+ // ------------------------------
52
57
  CALL (node_diff_map, node_db_id) {
53
- MATCH (n:Node {uuid: node_diff_map.uuid})
54
- WHERE node_db_id IS NULL
55
- OR %(id_func)s(n) = node_db_id
58
+ MATCH (n:Node {uuid: node_diff_map.uuid})-[n_is_part_of:IS_PART_OF]->(:Root)
59
+ WHERE node_db_id IS NULL OR %(id_func)s(n) = node_db_id
60
+ AND n_is_part_of.branch IN [$source_branch, $target_branch]
56
61
  RETURN n
62
+ ORDER BY n_is_part_of.branch_level DESC, n_is_part_of.from DESC, n_is_part_of.status ASC
63
+ LIMIT 1
57
64
  }
58
65
  WITH n, node_diff_map, is_node_kind_migration
59
66
  CALL (n, node_diff_map, is_node_kind_migration) {
@@ -224,16 +231,28 @@ CALL (n, node_diff_map, is_node_kind_migration) {
224
231
  ELSE NULL
225
232
  END AS rel_peer_db_id
226
233
  // ------------------------------
234
+ // find the correct relationship peer if the peer had its kind/inheritance migrated
235
+ // and there are multiple Nodes with the same UUID
236
+ // ------------------------------
237
+ CALL (rel_peer_id, rel_peer_db_id) {
238
+ MATCH (rel_peer:Node {uuid: rel_peer_id})-[target_is_part_of:IS_PART_OF]->(:Root)
239
+ WHERE (rel_peer_db_id IS NULL OR %(id_func)s(rel_peer) = rel_peer_db_id)
240
+ AND target_is_part_of.branch IN [$source_branch, $target_branch]
241
+ RETURN rel_peer
242
+ ORDER BY target_is_part_of.branch_level DESC, target_is_part_of.from DESC, target_is_part_of.status ASC
243
+ LIMIT 1
244
+ }
245
+ WITH rel_name, related_rel_status, rel_peer
246
+ // ------------------------------
227
247
  // determine the directions of each IS_RELATED
228
248
  // ------------------------------
229
- CALL (n, rel_name, rel_peer_id, rel_peer_db_id, related_rel_status) {
249
+ CALL (n, rel_name, rel_peer, related_rel_status) {
230
250
  MATCH (n)
231
251
  -[source_r_rel_1:IS_RELATED]
232
252
  -(r:Relationship {name: rel_name})
233
253
  -[source_r_rel_2:IS_RELATED]
234
- -(rel_peer:Node {uuid: rel_peer_id})
235
- WHERE (rel_peer_db_id IS NULL OR %(id_func)s(rel_peer) = rel_peer_db_id)
236
- AND source_r_rel_1.branch IN [$source_branch, $target_branch]
254
+ -(rel_peer)
255
+ WHERE source_r_rel_1.branch IN [$source_branch, $target_branch]
237
256
  AND source_r_rel_2.branch IN [$source_branch, $target_branch]
238
257
  AND source_r_rel_1.from <= $at AND source_r_rel_1.to IS NULL
239
258
  AND source_r_rel_2.from <= $at AND source_r_rel_2.to IS NULL
@@ -251,37 +270,34 @@ CALL (n, node_diff_map, is_node_kind_migration) {
251
270
  source_r_rel_1.hierarchy AS r1_hierarchy,
252
271
  source_r_rel_2.hierarchy AS r2_hierarchy
253
272
  }
254
- WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer_id, rel_peer_db_id, related_rel_status
255
- CALL (n, rel_name, rel_peer_id, rel_peer_db_id, related_rel_status) {
273
+ WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status
274
+ CALL (n, rel_name, rel_peer, related_rel_status) {
256
275
  OPTIONAL MATCH (n)
257
276
  -[target_r_rel_1:IS_RELATED {branch: $target_branch, status: "active"}]
258
277
  -(:Relationship {name: rel_name})
259
278
  -[target_r_rel_2:IS_RELATED {branch: $target_branch, status: "active"}]
260
- -(rel_peer:Node {uuid: rel_peer_id})
279
+ -(rel_peer)
261
280
  WHERE related_rel_status = "deleted"
262
- AND (rel_peer_db_id IS NULL OR %(id_func)s(rel_peer) = rel_peer_db_id)
263
281
  AND target_r_rel_1.from <= $at AND target_r_rel_1.to IS NULL
264
282
  AND target_r_rel_2.from <= $at AND target_r_rel_2.to IS NULL
265
283
  SET target_r_rel_1.to = $at
266
284
  SET target_r_rel_2.to = $at
267
285
  }
268
- WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer_id, rel_peer_db_id, related_rel_status
286
+ WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status
269
287
  // ------------------------------
270
288
  // conditionally create new IS_RELATED relationships on target_branch, if necessary
271
289
  // ------------------------------
272
- CALL (n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer_id, rel_peer_db_id, related_rel_status) {
273
- MATCH (p:Node {uuid: rel_peer_id})
274
- WHERE rel_peer_db_id IS NULL OR %(id_func)s(p) = rel_peer_db_id
290
+ CALL (n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status) {
275
291
  OPTIONAL MATCH (n)
276
292
  -[r_rel_1:IS_RELATED {branch: $target_branch, status: related_rel_status}]
277
293
  -(:Relationship {name: rel_name})
278
294
  -[r_rel_2:IS_RELATED {branch: $target_branch, status: related_rel_status}]
279
- -(p)
295
+ -(rel_peer)
280
296
  WHERE r_rel_1.from <= $at
281
297
  AND (r_rel_1.to >= $at OR r_rel_1.to IS NULL)
282
298
  AND r_rel_2.from <= $at
283
299
  AND (r_rel_2.to >= $at OR r_rel_2.to IS NULL)
284
- WITH p, r_rel_1, r_rel_2
300
+ WITH rel_peer, r_rel_1, r_rel_2
285
301
  WHERE r_rel_1 IS NULL
286
302
  AND r_rel_2 IS NULL
287
303
  // ------------------------------
@@ -301,19 +317,19 @@ CALL (n, node_diff_map, is_node_kind_migration) {
301
317
  <-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r1_hierarchy}]
302
318
  -(r)
303
319
  }
304
- CALL (r, p, r2_dir, r2_hierarchy, related_rel_status) {
305
- WITH r, p, r2_dir, r2_hierarchy, related_rel_status
320
+ CALL (r, rel_peer, r2_dir, r2_hierarchy, related_rel_status) {
321
+ WITH r, rel_peer, r2_dir, r2_hierarchy, related_rel_status
306
322
  WHERE r2_dir = "r"
307
323
  CREATE (r)
308
324
  -[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r2_hierarchy}]
309
- ->(p)
325
+ ->(rel_peer)
310
326
  }
311
- CALL (r, p, r2_dir, r2_hierarchy, related_rel_status) {
312
- WITH r, p, r2_dir, r2_hierarchy, related_rel_status
327
+ CALL (r, rel_peer, r2_dir, r2_hierarchy, related_rel_status) {
328
+ WITH r, rel_peer, r2_dir, r2_hierarchy, related_rel_status
313
329
  WHERE r2_dir = "l"
314
330
  CREATE (r)
315
331
  <-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r2_hierarchy}]
316
- -(p)
332
+ -(rel_peer)
317
333
  }
318
334
  }
319
335
  }
@@ -1 +1 @@
1
- GRAPH_VERSION = 39
1
+ GRAPH_VERSION = 40
@@ -41,6 +41,8 @@ from .m036_drop_attr_value_index import Migration036
41
41
  from .m037_index_attr_vals import Migration037
42
42
  from .m038_redo_0000_prefix_fix import Migration038
43
43
  from .m039_ipam_reconcile import Migration039
44
+ from .m040_duplicated_attributes import Migration040
45
+ from .m041_deleted_dup_edges import Migration041
44
46
 
45
47
  if TYPE_CHECKING:
46
48
  from infrahub.core.root import Root
@@ -87,6 +89,8 @@ MIGRATIONS: list[type[GraphMigration | InternalSchemaMigration | ArbitraryMigrat
87
89
  Migration037,
88
90
  Migration038,
89
91
  Migration039,
92
+ Migration040,
93
+ Migration041,
90
94
  ]
91
95
 
92
96
 
@@ -0,0 +1,81 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any, Sequence
4
+
5
+ from infrahub.core.migrations.shared import MigrationResult
6
+ from infrahub.core.query import Query, QueryType
7
+
8
+ from ..shared import GraphMigration
9
+
10
+ if TYPE_CHECKING:
11
+ from infrahub.database import InfrahubDatabase
12
+
13
+
14
+ class DeleteDuplicatedAttributesQuery(Query):
15
+ name: str = "delete_duplicated_attributes"
16
+ type: QueryType = QueryType.WRITE
17
+ insert_return: bool = False
18
+ insert_limit: bool = False
19
+
20
+ async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
21
+ query = """
22
+ // -------------
23
+ // get all the Nodes linked to multiple Attributes with the same name to drastically reduce the search space
24
+ // -------------
25
+ MATCH (n:Node)-[:HAS_ATTRIBUTE]->(attr:Attribute)
26
+ WITH DISTINCT n, attr
27
+ WITH n, attr.name AS attr_name, count(*) AS num_attrs
28
+ WHERE num_attrs > 1
29
+ // -------------
30
+ // for each Node-attr_name pair, get the possible duplicate Attributes
31
+ // -------------
32
+ MATCH (n)-[:HAS_ATTRIBUTE]->(dup_attr:Attribute {name: attr_name})
33
+ WITH DISTINCT n, dup_attr
34
+ // -------------
35
+ // get the branch(es) for each possible duplicate Attribute
36
+ // -------------
37
+ CALL (n, dup_attr) {
38
+ MATCH (n)-[r:HAS_ATTRIBUTE {status: "active"}]->(dup_attr)
39
+ WHERE r.to IS NULL
40
+ AND NOT exists((n)-[:HAS_ATTRIBUTE {status: "deleted", branch: r.branch}]->(dup_attr))
41
+ RETURN r.branch AS branch
42
+ }
43
+ // -------------
44
+ // get the latest update time for each duplicate Attribute on each branch
45
+ // -------------
46
+ CALL (dup_attr, branch) {
47
+ MATCH (dup_attr)-[r {branch: branch}]-()
48
+ RETURN max(r.from) AS latest_update
49
+ }
50
+ // -------------
51
+ // order the duplicate Attributes by latest update time
52
+ // -------------
53
+ WITH n, dup_attr, branch, latest_update
54
+ ORDER BY n, branch, dup_attr.name, latest_update DESC
55
+ // -------------
56
+ // for any Node-dup_attr_name pairs with multiple duplicate Attributes, keep the Attribute with the latest update
57
+ // on this branch and delete all the other edges on this branch for this Attribute
58
+ // -------------
59
+ WITH n, branch, dup_attr.name AS dup_attr_name, collect(dup_attr) AS dup_attrs_reverse_chronological
60
+ WHERE size(dup_attrs_reverse_chronological) > 1
61
+ WITH branch, tail(dup_attrs_reverse_chronological) AS dup_attrs_to_delete
62
+ UNWIND dup_attrs_to_delete AS dup_attr_to_delete
63
+ MATCH (dup_attr_to_delete)-[r {branch: branch}]-()
64
+ DELETE r
65
+ // -------------
66
+ // delete any orphaned Attributes
67
+ // -------------
68
+ WITH DISTINCT dup_attr_to_delete
69
+ WHERE NOT exists((dup_attr_to_delete)--())
70
+ DELETE dup_attr_to_delete
71
+ """
72
+ self.add_to_query(query)
73
+
74
+
75
+ class Migration040(GraphMigration):
76
+ name: str = "040_duplicated_attributes"
77
+ queries: Sequence[type[Query]] = [DeleteDuplicatedAttributesQuery]
78
+ minimum_version: int = 39
79
+
80
+ async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
81
+ return MigrationResult()
@@ -0,0 +1,149 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any
4
+
5
+ from rich import print as rprint
6
+
7
+ from infrahub.core.branch import Branch
8
+ from infrahub.core.diff.repository.repository import DiffRepository
9
+ from infrahub.core.initialization import get_root_node
10
+ from infrahub.core.migrations.shared import MigrationResult
11
+ from infrahub.core.query import Query, QueryType
12
+ from infrahub.dependencies.registry import build_component_registry, get_component_registry
13
+ from infrahub.log import get_logger
14
+
15
+ from ..shared import ArbitraryMigration
16
+
17
+ if TYPE_CHECKING:
18
+ from infrahub.database import InfrahubDatabase
19
+
20
+ log = get_logger()
21
+
22
+
23
+ class DeletePosthumousEdges(Query):
24
+ name = "delete_posthumous_edges_query"
25
+ type = QueryType.WRITE
26
+ insert_return = False
27
+
28
+ async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
29
+ query = """
30
+ // ------------
31
+ // find deleted nodes
32
+ // ------------
33
+ MATCH (n:Node)-[e:IS_PART_OF]->(:Root)
34
+ WHERE e.status = "deleted" OR e.to IS NOT NULL
35
+ WITH DISTINCT n, e.branch AS delete_branch, e.branch_level AS delete_branch_level, CASE
36
+ WHEN e.status = "deleted" THEN e.from
37
+ ELSE e.to
38
+ END AS delete_time
39
+ // ------------
40
+ // find the edges added to the deleted node after the delete time
41
+ // ------------
42
+ MATCH (n)-[added_e]-(peer)
43
+ WHERE added_e.from > delete_time
44
+ AND type(added_e) <> "IS_PART_OF"
45
+ // if the node was deleted on a branch (delete_branch_level > 1), and then updated on main/global (added_e.branch_level = 1), we can ignore it
46
+ AND added_e.branch_level >= delete_branch_level
47
+ AND (added_e.branch = delete_branch OR delete_branch_level = 1)
48
+ WITH DISTINCT n, delete_branch, delete_time, added_e, peer
49
+ // ------------
50
+ // get the branched_from for the branch on which the node was deleted
51
+ // ------------
52
+ CALL (added_e) {
53
+ MATCH (b:Branch {name: added_e.branch})
54
+ RETURN b.branched_from AS added_e_branched_from
55
+ }
56
+ // ------------
57
+ // account for the following situations, given that the edge update time is after the node delete time
58
+ // - deleted on main/global, updated on branch
59
+ // - illegal if the delete is before branch.branched_from
60
+ // - deleted on branch, updated on branch
61
+ // - illegal
62
+ // ------------
63
+ WITH n, delete_branch, delete_time, added_e, peer
64
+ WHERE delete_branch = added_e.branch
65
+ OR delete_time < added_e_branched_from
66
+ DELETE added_e
67
+ """
68
+ self.add_to_query(query)
69
+
70
+
71
+ class DeleteDuplicateEdgesForMigratedKindNodes(Query):
72
+ name = "delete_duplicate_edges_for_migrated_kind_nodes_query"
73
+ type = QueryType.WRITE
74
+ insert_return = False
75
+
76
+ async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
77
+ query = """
78
+ // ------------
79
+ // get UUIDs for migrated kind/inheritance nodes
80
+ // ------------
81
+ MATCH (n:Node)
82
+ WITH n.uuid AS node_uuid, count(*) AS num_nodes_with_uuid
83
+ WHERE num_nodes_with_uuid > 1
84
+ CALL (node_uuid) {
85
+ // ------------
86
+ // find any Relationships for these nodes
87
+ // ------------
88
+ MATCH (n:Node {uuid: node_uuid})-[:IS_RELATED]-(rel:Relationship)
89
+ WITH DISTINCT rel
90
+ MATCH (rel)-[e]->(peer)
91
+ WITH
92
+ type(e) AS e_type,
93
+ e.branch AS e_branch,
94
+ e.from AS e_from,
95
+ e.to AS e_to,
96
+ e.status AS e_status,
97
+ e.peer AS e_peer,
98
+ CASE
99
+ WHEN startNode(e) = rel THEN "out" ELSE "in"
100
+ END AS direction,
101
+ collect(e) AS duplicate_edges
102
+ WHERE size(duplicate_edges) > 1
103
+ WITH tail(duplicate_edges) AS duplicate_edges_to_delete
104
+ UNWIND duplicate_edges_to_delete AS edge_to_delete
105
+ DELETE edge_to_delete
106
+ } IN TRANSACTIONS OF 500 ROWS
107
+ """
108
+ self.add_to_query(query)
109
+
110
+
111
+ class Migration041(ArbitraryMigration):
112
+ """Clean up improper merges that duplicated edges to nodes with migrated kinds
113
+
114
+ - delete all existing diffs b/c they could contain incorrect nodes linking to deleted nodes with migrated kind/inheritance
115
+ - delete all edges added to any nodes AFTER they were deleted on main
116
+ - delete any duplicate edges touching migrated kind/inheritance nodes on main
117
+ """
118
+
119
+ name: str = "041_deleted_dup_edges"
120
+ minimum_version: int = 40
121
+
122
+ async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
123
+ result = MigrationResult()
124
+
125
+ return result
126
+
127
+ async def execute(self, db: InfrahubDatabase) -> MigrationResult:
128
+ root_node = await get_root_node(db=db)
129
+ default_branch_name = root_node.default_branch
130
+ default_branch = await Branch.get_by_name(db=db, name=default_branch_name)
131
+
132
+ rprint("Deleting all diffs", end="...")
133
+ build_component_registry()
134
+ component_registry = get_component_registry()
135
+ diff_repo = await component_registry.get_component(DiffRepository, db=db, branch=default_branch)
136
+ await diff_repo.delete_all_diff_roots()
137
+ rprint("done")
138
+
139
+ rprint("Deleting edges merged after node deleted", end="...")
140
+ delete_posthumous_edges_query = await DeletePosthumousEdges.init(db=db)
141
+ await delete_posthumous_edges_query.execute(db=db)
142
+ rprint("done")
143
+
144
+ rprint("Deleting duplicate edges for migrated kind/inheritance nodes", end="...")
145
+ delete_duplicate_edges_query = await DeleteDuplicateEdgesForMigratedKindNodes.init(db=db)
146
+ await delete_duplicate_edges_query.execute(db=db)
147
+ rprint("done")
148
+
149
+ return MigrationResult()
@@ -42,6 +42,16 @@ class NodeAttributeAddMigration(AttributeSchemaMigration):
42
42
  name: str = "node.attribute.add"
43
43
  queries: Sequence[type[AttributeMigrationQuery]] = [NodeAttributeAddMigrationQuery01] # type: ignore[assignment]
44
44
 
45
+ async def execute(
46
+ self,
47
+ db: InfrahubDatabase,
48
+ branch: Branch,
49
+ at: Timestamp | str | None = None,
50
+ ) -> MigrationResult:
51
+ if self.new_attribute_schema.inherited is True:
52
+ return MigrationResult()
53
+ return await super().execute(db=db, branch=branch, at=at)
54
+
45
55
  async def execute_post_queries(
46
56
  self,
47
57
  db: InfrahubDatabase,
@@ -57,14 +57,25 @@ async def extract_peer_data(
57
57
 
58
58
  for rel in template_peer.get_schema().relationship_names:
59
59
  rel_manager: RelationshipManager = getattr(template_peer, rel)
60
- if (
61
- rel_manager.schema.kind not in [RelationshipKind.COMPONENT, RelationshipKind.PARENT]
62
- or rel_manager.schema.name not in obj_peer_schema.relationship_names
63
- ):
60
+
61
+ if rel_manager.schema.name not in obj_peer_schema.relationship_names:
64
62
  continue
65
63
 
66
- if list(await rel_manager.get_peers(db=db)) == [current_template.id]:
64
+ peers_map = await rel_manager.get_peers(db=db)
65
+ if rel_manager.schema.kind in [RelationshipKind.COMPONENT, RelationshipKind.PARENT] and list(
66
+ peers_map.keys()
67
+ ) == [current_template.id]:
67
68
  obj_peer_data[rel] = {"id": parent_obj.id}
69
+ continue
70
+
71
+ rel_peer_ids = []
72
+ for peer_id, peer_object in peers_map.items():
73
+ # deeper templates are handled in the next level of recursion
74
+ if peer_object.get_schema().is_template_schema:
75
+ continue
76
+ rel_peer_ids.append({"id": peer_id})
77
+
78
+ obj_peer_data[rel] = rel_peer_ids
68
79
 
69
80
  return obj_peer_data
70
81
 
@@ -301,13 +301,15 @@ WITH p, q, diff_rel, CASE
301
301
  ELSE $from_time
302
302
  END AS row_from_time
303
303
  ORDER BY %(id_func)s(p) DESC
304
- SKIP $offset
305
- LIMIT $limit
304
+ SKIP toInteger($offset)
305
+ LIMIT toInteger($limit)
306
306
  // -------------------------------------
307
307
  // Add flag to indicate if there is more data after this
308
308
  // -------------------------------------
309
309
  WITH collect([p, q, diff_rel, row_from_time]) AS limited_results
310
- WITH limited_results, size(limited_results) = $limit AS has_more_data
310
+ // extra NULL row ensures that has_more_data is always returned, even if all results are filtered out below
311
+ WITH limited_results + [[NULL, NULL, NULL, NULL]] AS limited_results
312
+ WITH limited_results, size(limited_results) = ($limit + 1) AS has_more_data
311
313
  UNWIND limited_results AS one_result
312
314
  WITH one_result[0] AS p, one_result[1] AS q, one_result[2] AS diff_rel, one_result[3] AS row_from_time, has_more_data
313
315
  // -------------------------------------
@@ -470,14 +472,16 @@ AND (
470
472
  // Limit the number of paths
471
473
  // -------------------------------------
472
474
  WITH root, r_root, p, diff_rel, q
473
- ORDER BY r_root.from, p.uuid, q.uuid, diff_rel.branch, diff_rel.from
474
- SKIP $offset
475
- LIMIT $limit
475
+ ORDER BY r_root.from, p.uuid, q.uuid, q.name, diff_rel.branch, diff_rel.from
476
+ SKIP toInteger($offset)
477
+ LIMIT toInteger($limit)
476
478
  // -------------------------------------
477
479
  // Add flag to indicate if there is more data after this
478
480
  // -------------------------------------
479
481
  WITH collect([root, r_root, p, diff_rel, q]) AS limited_results
480
- WITH limited_results, size(limited_results) = $limit AS has_more_data
482
+ // extra NULL row ensures that has_more_data is always returned, even if all results are filtered out below
483
+ WITH limited_results + [[NULL, NULL, NULL, NULL, NULL]] AS limited_results
484
+ WITH limited_results, size(limited_results) = ($limit + 1) AS has_more_data
481
485
  UNWIND limited_results AS one_result
482
486
  WITH one_result[0] AS root, one_result[1] AS r_root, one_result[2] AS p, one_result[3] AS diff_rel, one_result[4] AS q, has_more_data
483
487
  // -------------------------------------
@@ -641,8 +645,28 @@ AND (
641
645
  )
642
646
  // skip paths where nodes/attrs/rels are updated after $from_time, those are handled in other queries
643
647
  AND (
644
- r_root.from <= $from_time AND (r_root.to IS NULL OR r_root.branch <> diff_rel.branch OR r_root.to <= $from_time)
645
- AND r_node.from <= $from_time AND (r_node.to IS NULL OR r_node.branch <> diff_rel.branch OR r_node.to <= $from_time)
648
+ (
649
+ r_root.branch = diff_rel.branch
650
+ AND r_root.from <= $from_time
651
+ AND (r_root.to IS NULL OR r_root.to >= $to_time)
652
+ )
653
+ OR (
654
+ r_root.branch <> diff_rel.branch
655
+ AND r_root.from <= $from_time
656
+ AND (r_root.to IS NULL OR r_root.to >= $branch_from_time)
657
+ )
658
+ )
659
+ AND (
660
+ (
661
+ r_node.branch = diff_rel.branch
662
+ AND r_node.from <= $from_time
663
+ AND (r_node.to IS NULL OR r_node.to >= $to_time)
664
+ )
665
+ OR (
666
+ r_node.branch <> diff_rel.branch
667
+ AND r_node.from <= $from_time
668
+ AND (r_node.to IS NULL OR r_node.to >= $branch_from_time)
669
+ )
646
670
  )
647
671
  )
648
672
  // time-based filters for new nodes
@@ -658,8 +682,27 @@ AND (
658
682
  )
659
683
  // skip paths where nodes/attrs/rels are updated after $branch_from_time, those are handled in other queries
660
684
  AND (
661
- r_root.from <= $branch_from_time AND (r_root.to IS NULL OR r_root.branch <> diff_rel.branch OR r_root.to <= $branch_from_time)
662
- AND r_node.from <= $branch_from_time AND (r_node.to IS NULL OR r_node.branch <> diff_rel.branch OR r_node.to <= $branch_from_time)
685
+ (
686
+ r_root.branch = diff_rel.branch
687
+ AND (r_root.to IS NULL OR r_root.to >= $to_time)
688
+ )
689
+ OR (
690
+ r_root.branch <> diff_rel.branch
691
+ AND r_root.from <= $branch_from_time
692
+ AND (r_root.to IS NULL OR r_root.to >= $branch_from_time)
693
+ )
694
+ )
695
+ AND (
696
+ (
697
+ r_node.branch = diff_rel.branch
698
+ AND r_node.from <= $branch_from_time
699
+ AND (r_node.to IS NULL OR r_node.to >= $to_time)
700
+ )
701
+ OR (
702
+ r_node.branch <> diff_rel.branch
703
+ AND r_node.from <= $branch_from_time
704
+ AND (r_node.to IS NULL OR r_node.to >= $branch_from_time)
705
+ )
663
706
  )
664
707
  )
665
708
  )
@@ -701,13 +744,15 @@ AND [%(id_func)s(n), type(r_node)] <> [%(id_func)s(q), type(diff_rel)]
701
744
  // -------------------------------------
702
745
  WITH diff_rel_path, r_root, n, r_node, p, diff_rel
703
746
  ORDER BY r_root.from, n.uuid, p.uuid, type(diff_rel), diff_rel.branch, diff_rel.from
704
- SKIP $offset
705
- LIMIT $limit
747
+ SKIP toInteger($offset)
748
+ LIMIT toInteger($limit)
706
749
  // -------------------------------------
707
750
  // Add flag to indicate if there is more data after this
708
751
  // -------------------------------------
709
752
  WITH collect([diff_rel_path, r_root, n, r_node, p, diff_rel]) AS limited_results
710
- WITH limited_results, size(limited_results) = $limit AS has_more_data
753
+ // extra NULL row ensures that has_more_data is always returned, even if all results are filtered out below
754
+ WITH limited_results + [[NULL, NULL, NULL, NULL, NULL, NULL]] AS limited_results
755
+ WITH limited_results, size(limited_results) = ($limit + 1) AS has_more_data
711
756
  UNWIND limited_results AS one_result
712
757
  WITH one_result[0] AS diff_rel_path, one_result[1] AS r_root, one_result[2] AS n,
713
758
  one_result[3] AS r_node, one_result[4] AS p, one_result[5] AS diff_rel, has_more_data
@@ -803,8 +848,8 @@ WHERE num_nodes_with_uuid > 1
803
848
  // -------------------------------------
804
849
  WITH node_uuid
805
850
  ORDER BY node_uuid
806
- SKIP $offset
807
- LIMIT $limit
851
+ SKIP toInteger($offset)
852
+ LIMIT toInteger($limit)
808
853
  WITH collect(node_uuid) AS node_uuids
809
854
  WITH node_uuids, size(node_uuids) = $limit AS has_more_data
810
855
  MATCH (:Root)<-[diff_rel:IS_PART_OF {branch: $branch_name}]-(n:Node)