infrahub-server 1.2.9__py3-none-any.whl → 1.2.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. infrahub/computed_attribute/models.py +13 -0
  2. infrahub/computed_attribute/tasks.py +29 -28
  3. infrahub/core/attribute.py +43 -2
  4. infrahub/core/branch/models.py +8 -9
  5. infrahub/core/diff/calculator.py +61 -8
  6. infrahub/core/diff/combiner.py +37 -29
  7. infrahub/core/diff/enricher/hierarchy.py +4 -6
  8. infrahub/core/diff/merger/merger.py +29 -1
  9. infrahub/core/diff/merger/serializer.py +1 -0
  10. infrahub/core/diff/model/path.py +6 -3
  11. infrahub/core/diff/query/merge.py +264 -28
  12. infrahub/core/diff/query/save.py +6 -5
  13. infrahub/core/diff/query_parser.py +4 -15
  14. infrahub/core/diff/repository/deserializer.py +7 -6
  15. infrahub/core/graph/__init__.py +1 -1
  16. infrahub/core/migrations/graph/m028_delete_diffs.py +38 -0
  17. infrahub/core/query/diff.py +97 -13
  18. infrahub/core/query/node.py +26 -3
  19. infrahub/core/query/relationship.py +96 -35
  20. infrahub/core/relationship/model.py +1 -1
  21. infrahub/core/validators/uniqueness/query.py +7 -0
  22. infrahub/trigger/setup.py +13 -2
  23. infrahub/types.py +1 -1
  24. infrahub/webhook/models.py +2 -1
  25. infrahub/workflows/catalogue.py +9 -0
  26. infrahub_sdk/timestamp.py +2 -2
  27. {infrahub_server-1.2.9.dist-info → infrahub_server-1.2.10.dist-info}/METADATA +3 -3
  28. {infrahub_server-1.2.9.dist-info → infrahub_server-1.2.10.dist-info}/RECORD +33 -32
  29. infrahub_testcontainers/docker-compose.test.yml +2 -2
  30. infrahub_testcontainers/performance_test.py +6 -3
  31. {infrahub_server-1.2.9.dist-info → infrahub_server-1.2.10.dist-info}/LICENSE.txt +0 -0
  32. {infrahub_server-1.2.9.dist-info → infrahub_server-1.2.10.dist-info}/WHEEL +0 -0
  33. {infrahub_server-1.2.9.dist-info → infrahub_server-1.2.10.dist-info}/entry_points.txt +0 -0
@@ -1,9 +1,10 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import TYPE_CHECKING, Any
3
+ from dataclasses import dataclass
4
+ from typing import TYPE_CHECKING, Any, Generator
4
5
 
5
6
  from infrahub import config
6
- from infrahub.core.constants import GLOBAL_BRANCH_NAME, BranchSupportType
7
+ from infrahub.core.constants import GLOBAL_BRANCH_NAME, BranchSupportType, DiffAction, RelationshipStatus
7
8
  from infrahub.core.query import Query, QueryType
8
9
  from infrahub.core.timestamp import Timestamp
9
10
 
@@ -128,12 +129,13 @@ CALL {
128
129
  // add base branch paths before branched_from, if they exist
129
130
  // -------------------------------------
130
131
  WITH n, attr_rel, r_node, r_prop
132
+ // 'base_n' instead of 'n' here to get previous value for node with a migrated kind/inheritance
131
133
  OPTIONAL MATCH latest_base_path = (:Root)<-[base_r_root:IS_PART_OF {branch: $base_branch_name}]
132
- -(n)-[base_r_node {branch: $base_branch_name}]
134
+ -(base_n {uuid: n.uuid})-[base_r_node {branch: $base_branch_name}]
133
135
  -(attr_rel)-[base_r_prop {branch: $base_branch_name}]->(base_prop)
134
136
  WHERE type(base_r_node) = type(r_node)
135
137
  AND type(base_r_prop) = type(r_prop)
136
- AND [%(id_func)s(n), type(base_r_node)] <> [%(id_func)s(base_prop), type(base_r_prop)]
138
+ AND [%(id_func)s(base_n), type(base_r_node)] <> [%(id_func)s(base_prop), type(base_r_prop)]
137
139
  AND all(
138
140
  r in relationships(latest_base_path)
139
141
  WHERE r.from < $branch_from_time
@@ -143,7 +145,7 @@ CALL {
143
145
  // the migration leaves two nodes with the same UUID linked to the same Relationship
144
146
  // ------------------------
145
147
  AND (
146
- n.uuid IS NULL OR base_prop.uuid IS NULL OR n.uuid <> base_prop.uuid
148
+ base_n.uuid IS NULL OR base_prop.uuid IS NULL OR base_n.uuid <> base_prop.uuid
147
149
  OR type(base_r_node) <> "IS_RELATED" OR type(base_r_prop) <> "IS_RELATED"
148
150
  )
149
151
  WITH latest_base_path, base_r_root, base_r_node, base_r_prop
@@ -277,7 +279,7 @@ WITH p, q, diff_rel, CASE
277
279
  WHEN $new_node_ids_list IS NOT NULL AND p.uuid IN $new_node_ids_list THEN $branch_from_time
278
280
  ELSE $from_time
279
281
  END AS row_from_time
280
- ORDER BY p.uuid DESC
282
+ ORDER BY %(id_func)s(p) DESC
281
283
  SKIP $offset
282
284
  LIMIT $limit
283
285
  // -------------------------------------
@@ -314,15 +316,15 @@ CALL {
314
316
  AND node.branch_support IN [$branch_aware, $branch_agnostic]
315
317
  AND type(r_prop) IN ["IS_VISIBLE", "IS_PROTECTED", "HAS_SOURCE", "HAS_OWNER", "HAS_VALUE", "IS_RELATED"]
316
318
  AND any(l in labels(prop) WHERE l in ["Boolean", "Node", "AttributeValue"])
317
- AND ALL(
318
- r in [r_node, r_prop]
319
- WHERE r.from < $to_time AND r.branch = top_diff_rel.branch
320
- )
321
319
  AND (top_diff_rel.to IS NULL OR top_diff_rel.to >= r_node.from)
322
320
  AND (r_node.to IS NULL OR r_node.to >= r_prop.from)
323
321
  AND [%(id_func)s(p), type(r_node)] <> [%(id_func)s(prop), type(r_prop)]
324
- AND top_diff_rel.status = r_node.status
325
- AND top_diff_rel.status = r_prop.status
322
+ AND r_node.from < $to_time
323
+ AND r_node.branch = top_diff_rel.branch
324
+ AND r_node.status = top_diff_rel.status
325
+ AND r_prop.from < $to_time
326
+ AND r_prop.branch = top_diff_rel.branch
327
+ AND r_prop.status = top_diff_rel.status
326
328
  // ------------------------
327
329
  // special handling for nodes that had their kind updated,
328
330
  // the migration leaves two nodes with the same UUID linked to the same Relationship
@@ -717,7 +719,7 @@ CALL {
717
719
  CALL {
718
720
  WITH n, row_from_time
719
721
  OPTIONAL MATCH (root:Root)<-[r_root_deleted:IS_PART_OF {branch: $branch_name}]-(n)
720
- WHERE row_from_time <= r_root_deleted.from < $to_time
722
+ WHERE r_root_deleted.from < $to_time
721
723
  WITH r_root_deleted
722
724
  ORDER BY r_root_deleted.status DESC
723
725
  LIMIT 1
@@ -745,3 +747,85 @@ WITH n, p, type(diff_rel) AS drt, head(collect(diff_rel_path)) AS diff_path, has
745
747
  self.add_to_query(self.get_relationship_peer_side_query(db=db))
746
748
  self.add_to_query("UNWIND diff_rel_paths AS diff_path")
747
749
  self.return_labels = ["DISTINCT diff_path AS diff_path", "has_more_data"]
750
+
751
+
752
+ @dataclass
753
+ class MigratedKindNode:
754
+ uuid: str
755
+ kind: str
756
+ db_id: str
757
+ from_time: Timestamp
758
+ action: DiffAction
759
+ has_more_data: bool
760
+
761
+
762
+ class DiffMigratedKindNodesQuery(DiffCalculationQuery):
763
+ name = "diff_migrated_kind_nodes_query"
764
+
765
+ async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
766
+ params_dict = self.get_params()
767
+ self.params.update(params_dict)
768
+ migrated_kind_nodes_query = """
769
+ // -------------------------------------
770
+ // Identify nodes added/removed on branch in the time frame
771
+ // -------------------------------------
772
+ MATCH (:Root)<-[diff_rel:IS_PART_OF {branch: $branch_name}]-(n:Node)
773
+ WHERE (
774
+ ($from_time <= diff_rel.from < $to_time AND (diff_rel.to IS NULL OR diff_rel.to > $to_time))
775
+ OR ($from_time <= diff_rel.to < $to_time)
776
+ )
777
+ AND n.branch_support = $branch_aware
778
+ WITH DISTINCT n.uuid AS node_uuid, %(id_func)s(n) AS db_id
779
+ WITH node_uuid, count(*) AS num_nodes_with_uuid
780
+ WHERE num_nodes_with_uuid > 1
781
+ // -------------------------------------
782
+ // Limit the number of nodes
783
+ // -------------------------------------
784
+ WITH node_uuid
785
+ ORDER BY node_uuid
786
+ SKIP $offset
787
+ LIMIT $limit
788
+ WITH collect(node_uuid) AS node_uuids
789
+ WITH node_uuids, size(node_uuids) = $limit AS has_more_data
790
+ MATCH (:Root)<-[diff_rel:IS_PART_OF {branch: $branch_name}]-(n:Node)
791
+ WHERE n.uuid IN node_uuids
792
+ AND (
793
+ ($from_time <= diff_rel.from < $to_time AND (diff_rel.to IS NULL OR diff_rel.to > $to_time))
794
+ OR ($from_time <= diff_rel.to < $to_time)
795
+ )
796
+ // -------------------------------------
797
+ // Ignore node created and deleted on this branch
798
+ // -------------------------------------
799
+ CALL {
800
+ WITH n
801
+ OPTIONAL MATCH (:Root)<-[diff_rel:IS_PART_OF {branch: $branch_name}]-(n)
802
+ WITH diff_rel
803
+ ORDER BY diff_rel.from ASC
804
+ WITH collect(diff_rel.status) AS statuses
805
+ RETURN statuses = ["active", "deleted"] AS intra_branch_update
806
+ }
807
+ WITH n.uuid AS uuid, n.kind AS kind, %(id_func)s(n) AS db_id, diff_rel.from_time AS from_time, diff_rel.status AS status, has_more_data
808
+ WHERE intra_branch_update = FALSE
809
+ """ % {"id_func": db.get_id_function_name()}
810
+ self.add_to_query(query=migrated_kind_nodes_query)
811
+ self.return_labels = [
812
+ "uuid",
813
+ "kind",
814
+ "db_id",
815
+ "from_time",
816
+ "status",
817
+ "has_more_data",
818
+ ]
819
+
820
+ def get_migrated_kind_nodes(self) -> Generator[MigratedKindNode, None, None]:
821
+ for result in self.get_results():
822
+ yield MigratedKindNode(
823
+ uuid=result.get_as_type("uuid", return_type=str),
824
+ kind=result.get_as_type("kind", return_type=str),
825
+ db_id=result.get_as_type("db_id", return_type=str),
826
+ from_time=result.get_as_type("from_time", return_type=Timestamp),
827
+ action=DiffAction.REMOVED
828
+ if result.get_as_type("status", return_type=str).lower() == RelationshipStatus.DELETED.value
829
+ else DiffAction.ADDED,
830
+ has_more_data=result.get_as_type("has_more_data", bool),
831
+ )
@@ -92,7 +92,7 @@ class NodeAttributesFromDB:
92
92
  class PeerInfo:
93
93
  uuid: str
94
94
  kind: str
95
- labels: frozenset[str]
95
+ db_id: str
96
96
 
97
97
 
98
98
  class NodeQuery(Query):
@@ -413,9 +413,32 @@ class NodeDeleteQuery(NodeQuery):
413
413
  self.params["branch"] = self.branch.name
414
414
  self.params["branch_level"] = self.branch.hierarchy_level
415
415
 
416
+ if self.branch.is_global or self.branch.is_default:
417
+ node_query_match = """
418
+ MATCH (n:Node { uuid: $uuid })
419
+ OPTIONAL MATCH (n)-[delete_edge:IS_PART_OF {status: "deleted", branch: $branch}]->(:Root)
420
+ WHERE delete_edge.from <= $at
421
+ WITH n WHERE delete_edge IS NULL
422
+ """
423
+ else:
424
+ node_filter, node_filter_params = self.branch.get_query_filter_path(at=self.at, variable_name="r")
425
+ node_query_match = """
426
+ MATCH (n:Node { uuid: $uuid })
427
+ CALL {
428
+ WITH n
429
+ MATCH (n)-[r:IS_PART_OF]->(:Root)
430
+ WHERE %(node_filter)s
431
+ RETURN r.status = "active" AS is_active
432
+ ORDER BY r.from DESC
433
+ LIMIT 1
434
+ }
435
+ WITH n WHERE is_active = TRUE
436
+ """ % {"node_filter": node_filter}
437
+ self.params.update(node_filter_params)
438
+ self.add_to_query(node_query_match)
439
+
416
440
  query = """
417
441
  MATCH (root:Root)
418
- MATCH (n:Node { uuid: $uuid })
419
442
  CREATE (n)-[r:IS_PART_OF { branch: $branch, branch_level: $branch_level, status: "deleted", from: $at }]->(root)
420
443
  """
421
444
 
@@ -1554,5 +1577,5 @@ class NodeGetHierarchyQuery(Query):
1554
1577
  yield PeerInfo(
1555
1578
  uuid=peer_node.get("uuid"),
1556
1579
  kind=peer_node.get("kind"),
1557
- labels=peer_node.labels,
1580
+ db_id=peer_node.element_id,
1558
1581
  )
@@ -73,30 +73,27 @@ class RelationshipPeerData:
73
73
  source_id: UUID
74
74
  """UUID of the Source Node."""
75
75
 
76
+ source_db_id: str
77
+ """Internal DB ID of the Source Node."""
78
+
76
79
  source_kind: str
77
80
  """Kind of the Source Node."""
78
81
 
79
- source_labels: frozenset[str]
80
- """Labels of the Source Node."""
81
-
82
82
  peer_id: UUID
83
83
  """UUID of the Peer Node."""
84
84
 
85
+ peer_db_id: str
86
+ """Internal DB ID of the Peer Node."""
87
+
85
88
  peer_kind: str
86
89
  """Kind of the Peer Node."""
87
90
 
88
- peer_labels: frozenset[str]
89
- """Labels of the Peer Node."""
90
-
91
91
  properties: dict[str, FlagPropertyData | NodePropertyData]
92
92
  """UUID of the Relationship Node."""
93
93
 
94
94
  rel_node_id: UUID | None = None
95
95
  """UUID of the Relationship Node."""
96
96
 
97
- peer_db_id: str | None = None
98
- """Internal DB ID of the Peer Node."""
99
-
100
97
  rel_node_db_id: str | None = None
101
98
  """Internal DB ID of the Relationship Node."""
102
99
 
@@ -205,6 +202,63 @@ class RelationshipQuery(Query):
205
202
  rel_prop_dict["hierarchy"] = self.schema.hierarchical
206
203
  return rel_prop_dict
207
204
 
205
+ def add_source_match_to_query(self, source_branch: Branch) -> None:
206
+ self.params["source_id"] = self.source_id or self.source.get_id()
207
+ if source_branch.is_global or source_branch.is_default:
208
+ source_query_match = """
209
+ MATCH (s:Node { uuid: $source_id })
210
+ OPTIONAL MATCH (s)-[delete_edge:IS_PART_OF {status: "deleted", branch: $source_branch}]->(:Root)
211
+ WHERE delete_edge.from <= $at
212
+ WITH *, s WHERE delete_edge IS NULL
213
+ """
214
+ self.params["source_branch"] = source_branch.name
215
+ source_filter, source_filter_params = source_branch.get_query_filter_path(
216
+ at=self.at, variable_name="r", params_prefix="src_"
217
+ )
218
+ source_query_match = """
219
+ MATCH (s:Node { uuid: $source_id })
220
+ CALL {
221
+ WITH s
222
+ MATCH (s)-[r:IS_PART_OF]->(:Root)
223
+ WHERE %(source_filter)s
224
+ RETURN r.status = "active" AS s_is_active
225
+ ORDER BY r.from DESC
226
+ LIMIT 1
227
+ }
228
+ WITH *, s WHERE s_is_active = TRUE
229
+ """ % {"source_filter": source_filter}
230
+ self.params.update(source_filter_params)
231
+ self.add_to_query(source_query_match)
232
+
233
+ def add_dest_match_to_query(self, destination_branch: Branch, destination_id: str) -> None:
234
+ self.params["destination_id"] = destination_id
235
+ if destination_branch.is_global or destination_branch.is_default:
236
+ destination_query_match = """
237
+ MATCH (d:Node { uuid: $destination_id })
238
+ OPTIONAL MATCH (d)-[delete_edge:IS_PART_OF {status: "deleted", branch: $destination_branch}]->(:Root)
239
+ WHERE delete_edge.from <= $at
240
+ WITH *, d WHERE delete_edge IS NULL
241
+ """
242
+ self.params["destination_branch"] = destination_branch.name
243
+ else:
244
+ destination_filter, destination_filter_params = destination_branch.get_query_filter_path(
245
+ at=self.at, variable_name="r", params_prefix="dst_"
246
+ )
247
+ destination_query_match = """
248
+ MATCH (d:Node { uuid: $destination_id })
249
+ CALL {
250
+ WITH d
251
+ MATCH (d)-[r:IS_PART_OF]->(:Root)
252
+ WHERE %(destination_filter)s
253
+ RETURN r.status = "active" AS d_is_active
254
+ ORDER BY r.from DESC
255
+ LIMIT 1
256
+ }
257
+ WITH *, d WHERE d_is_active = TRUE
258
+ """ % {"destination_filter": destination_filter}
259
+ self.params.update(destination_filter_params)
260
+ self.add_to_query(destination_query_match)
261
+
208
262
 
209
263
  class RelationshipCreateQuery(RelationshipQuery):
210
264
  name = "relationship_create"
@@ -223,8 +277,6 @@ class RelationshipCreateQuery(RelationshipQuery):
223
277
  super().__init__(destination=destination, destination_id=destination_id, **kwargs)
224
278
 
225
279
  async def query_init(self, db: InfrahubDatabase, **kwargs) -> None: # noqa: ARG002
226
- self.params["source_id"] = self.source_id
227
- self.params["destination_id"] = self.destination_id
228
280
  self.params["name"] = self.schema.identifier
229
281
  self.params["branch_support"] = self.schema.branch.value
230
282
 
@@ -237,12 +289,11 @@ class RelationshipCreateQuery(RelationshipQuery):
237
289
  self.params["is_protected"] = self.rel.is_protected
238
290
  self.params["is_visible"] = self.rel.is_visible
239
291
 
240
- query_match = """
241
- MATCH (s:Node { uuid: $source_id })
242
- MATCH (d:Node { uuid: $destination_id })
243
- """
244
- self.add_to_query(query_match)
245
-
292
+ self.add_source_match_to_query(source_branch=self.source.get_branch_based_on_support_type())
293
+ self.add_dest_match_to_query(
294
+ destination_branch=self.destination.get_branch_based_on_support_type(),
295
+ destination_id=self.destination_id or self.destination.get_id(),
296
+ )
246
297
  self.query_add_all_node_property_match()
247
298
 
248
299
  self.params["rel_prop"] = self.get_relationship_properties_dict(status=RelationshipStatus.ACTIVE)
@@ -387,7 +438,6 @@ class RelationshipDataDeleteQuery(RelationshipQuery):
387
438
 
388
439
  async def query_init(self, db: InfrahubDatabase, **kwargs) -> None: # noqa: ARG002
389
440
  self.params["source_id"] = self.source_id
390
- self.params["destination_id"] = self.data.peer_id
391
441
  self.params["rel_node_id"] = self.data.rel_node_id
392
442
  self.params["name"] = self.schema.identifier
393
443
  self.params["branch"] = self.branch.name
@@ -397,9 +447,10 @@ class RelationshipDataDeleteQuery(RelationshipQuery):
397
447
  # -----------------------------------------------------------------------
398
448
  # Match all nodes, including properties
399
449
  # -----------------------------------------------------------------------
450
+
451
+ self.add_source_match_to_query(source_branch=self.source.get_branch_based_on_support_type())
452
+ self.add_dest_match_to_query(destination_branch=self.branch, destination_id=self.data.peer_id)
400
453
  query = """
401
- MATCH (s:Node { uuid: $source_id })
402
- MATCH (d:Node { uuid: $destination_id })
403
454
  MATCH (rl:Relationship { uuid: $rel_node_id })
404
455
  """
405
456
  self.add_to_query(query)
@@ -451,8 +502,6 @@ class RelationshipDeleteQuery(RelationshipQuery):
451
502
 
452
503
  async def query_init(self, db: InfrahubDatabase, **kwargs) -> None: # noqa: ARG002
453
504
  rel_filter, rel_params = self.branch.get_query_filter_path(at=self.at, variable_name="edge")
454
- self.params["source_id"] = self.source_id
455
- self.params["destination_id"] = self.destination_id
456
505
  self.params["rel_id"] = self.rel.id
457
506
  self.params["branch"] = self.branch.name
458
507
  self.params["rel_prop"] = self.get_relationship_properties_dict(status=RelationshipStatus.DELETED)
@@ -463,9 +512,14 @@ class RelationshipDeleteQuery(RelationshipQuery):
463
512
  r1 = f"{arrows.left.start}[r1:{self.rel_type} $rel_prop ]{arrows.left.end}"
464
513
  r2 = f"{arrows.right.start}[r2:{self.rel_type} $rel_prop ]{arrows.right.end}"
465
514
 
515
+ self.add_source_match_to_query(source_branch=self.source.get_branch_based_on_support_type())
516
+ self.add_dest_match_to_query(
517
+ destination_branch=self.destination.get_branch_based_on_support_type(),
518
+ destination_id=self.destination_id or self.destination.get_id(),
519
+ )
466
520
  query = """
467
- MATCH (s:Node { uuid: $source_id })-[:IS_RELATED]-(rl:Relationship {uuid: $rel_id})-[:IS_RELATED]-(d:Node { uuid: $destination_id })
468
- WITH s, rl, d
521
+ MATCH (s)-[:IS_RELATED]-(rl:Relationship {uuid: $rel_id})-[:IS_RELATED]-(d)
522
+ WITH DISTINCT s, rl, d
469
523
  LIMIT 1
470
524
  CREATE (s)%(r1)s(rl)
471
525
  CREATE (rl)%(r2)s(d)
@@ -765,11 +819,11 @@ class RelationshipGetPeerQuery(Query):
765
819
  peer_node = result.get_node("peer")
766
820
  data = RelationshipPeerData(
767
821
  source_id=source_node.get("uuid"),
822
+ source_db_id=source_node.element_id,
768
823
  source_kind=source_node.get("kind"),
769
- source_labels=source_node.labels,
770
824
  peer_id=peer_node.get("uuid"),
825
+ peer_db_id=peer_node.element_id,
771
826
  peer_kind=peer_node.get("kind"),
772
- peer_labels=peer_node.labels,
773
827
  rel_node_db_id=result.get("rl").element_id,
774
828
  rel_node_id=result.get("rl").get("uuid"),
775
829
  updated_at=rels[0]["from"],
@@ -807,8 +861,6 @@ class RelationshipGetQuery(RelationshipQuery):
807
861
  type: QueryType = QueryType.READ
808
862
 
809
863
  async def query_init(self, db: InfrahubDatabase, **kwargs) -> None: # noqa: ARG002
810
- self.params["source_id"] = self.source_id
811
- self.params["destination_id"] = self.destination_id
812
864
  self.params["name"] = self.schema.identifier
813
865
  self.params["branch"] = self.branch.name
814
866
 
@@ -822,9 +874,12 @@ class RelationshipGetQuery(RelationshipQuery):
822
874
  r1 = f"{arrows.left.start}[r1:{self.rel.rel_type}]{arrows.left.end}"
823
875
  r2 = f"{arrows.right.start}[r2:{self.rel.rel_type}]{arrows.right.end}"
824
876
 
877
+ self.add_source_match_to_query(source_branch=self.source.get_branch_based_on_support_type())
878
+ self.add_dest_match_to_query(
879
+ destination_branch=self.destination.get_branch_based_on_support_type(),
880
+ destination_id=self.destination_id or self.destination.get_id(),
881
+ )
825
882
  query = """
826
- MATCH (s:Node { uuid: $source_id })
827
- MATCH (d:Node { uuid: $destination_id })
828
883
  MATCH (s)%s(rl:Relationship { name: $name })%s(d)
829
884
  WHERE %s
830
885
  """ % (
@@ -1051,7 +1106,11 @@ class RelationshipDeleteAllQuery(Query):
1051
1106
  CALL {
1052
1107
  WITH rl
1053
1108
  MATCH (rl)-[active_edge:IS_RELATED]->(n)
1054
- WHERE %(active_rel_filter)s AND active_edge.status ="active"
1109
+ WHERE %(active_rel_filter)s
1110
+ WITH rl, active_edge, n
1111
+ ORDER BY %(id_func)s(rl), %(id_func)s(n), active_edge.from DESC
1112
+ WITH rl, n, head(collect(active_edge)) AS active_edge
1113
+ WHERE active_edge.status = "active"
1055
1114
  CREATE (rl)-[deleted_edge:IS_RELATED $rel_prop]->(n)
1056
1115
  SET deleted_edge.hierarchy = active_edge.hierarchy
1057
1116
  WITH rl, active_edge, n
@@ -1067,7 +1126,11 @@ class RelationshipDeleteAllQuery(Query):
1067
1126
 
1068
1127
  WITH rl
1069
1128
  MATCH (rl)<-[active_edge:IS_RELATED]-(n)
1070
- WHERE %(active_rel_filter)s AND active_edge.status ="active"
1129
+ WHERE %(active_rel_filter)s
1130
+ WITH rl, active_edge, n
1131
+ ORDER BY %(id_func)s(rl), %(id_func)s(n), active_edge.from DESC
1132
+ WITH rl, n, head(collect(active_edge)) AS active_edge
1133
+ WHERE active_edge.status = "active"
1071
1134
  CREATE (rl)<-[deleted_edge:IS_RELATED $rel_prop]-(n)
1072
1135
  SET deleted_edge.hierarchy = active_edge.hierarchy
1073
1136
  WITH rl, active_edge, n
@@ -1080,9 +1143,7 @@ class RelationshipDeleteAllQuery(Query):
1080
1143
  "inbound" as rel_direction
1081
1144
  }
1082
1145
  RETURN DISTINCT uuid, kind, rel_identifier, rel_direction
1083
- """ % {
1084
- "active_rel_filter": active_rel_filter,
1085
- }
1146
+ """ % {"active_rel_filter": active_rel_filter, "id_func": db.get_id_function_name()}
1086
1147
 
1087
1148
  self.add_to_query(query)
1088
1149
 
@@ -416,7 +416,7 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
416
416
  await update_relationships_to(rel_ids_to_update, to=delete_at, db=db)
417
417
 
418
418
  delete_query = await RelationshipDeleteQuery.init(
419
- db=db, rel=self, source_id=node.id, destination_id=peer.id, branch=branch, at=delete_at
419
+ db=db, rel=self, source=node, destination=peer, branch=branch, at=delete_at
420
420
  )
421
421
  await delete_query.execute(db=db)
422
422
 
@@ -225,6 +225,13 @@ class NodeUniqueAttributeConstraintQuery(Query):
225
225
  attr_name,
226
226
  attr_value,
227
227
  relationship_identifier
228
+ ORDER BY
229
+ node_id,
230
+ deepest_branch_name,
231
+ node_count,
232
+ attr_name,
233
+ attr_value,
234
+ relationship_identifier
228
235
  """ % {
229
236
  "select_subqueries_str": select_subqueries_str,
230
237
  "branch_filter": branch_filter,
infrahub/trigger/setup.py CHANGED
@@ -39,9 +39,9 @@ async def setup_triggers(
39
39
  report = TriggerSetupReport()
40
40
 
41
41
  if trigger_type:
42
- log.info(f"Setting up triggers of type {trigger_type.value}")
42
+ log.debug(f"Setting up triggers of type {trigger_type.value}")
43
43
  else:
44
- log.info("Setting up all triggers")
44
+ log.debug("Setting up all triggers")
45
45
 
46
46
  # -------------------------------------------------------------
47
47
  # Retrieve existing Deployments and Automation from the server
@@ -112,4 +112,15 @@ async def setup_triggers(
112
112
  await client.delete_automation(automation_id=existing_automation.id)
113
113
  log.info(f"{item_to_delete} Deleted")
114
114
 
115
+ if trigger_type:
116
+ log.info(
117
+ f"Processed triggers of type {trigger_type.value}: "
118
+ f"{len(report.created)} created, {len(report.updated)} updated, {len(report.unchanged)} unchanged, {len(report.deleted)} deleted"
119
+ )
120
+ else:
121
+ log.info(
122
+ f"Processed all triggers: "
123
+ f"{len(report.created)} created, {len(report.updated)} updated, {len(report.unchanged)} unchanged, {len(report.deleted)} deleted"
124
+ )
125
+
115
126
  return report
infrahub/types.py CHANGED
@@ -366,7 +366,7 @@ ATTRIBUTE_PYTHON_TYPES: dict[str, type] = {
366
366
  ATTRIBUTE_KIND_LABELS = list(ATTRIBUTE_TYPES.keys())
367
367
 
368
368
  # Data types supporting large values, which can therefore not be indexed in neo4j.
369
- LARGE_ATTRIBUTE_TYPES = [TextArea, JSON]
369
+ LARGE_ATTRIBUTE_TYPES = [TextArea, JSON, List]
370
370
 
371
371
 
372
372
  def get_attribute_type(kind: str = "Default") -> type[InfrahubDataType]:
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import base64
4
4
  import hashlib
5
5
  import hmac
6
+ import json
6
7
  from typing import TYPE_CHECKING, Any
7
8
  from uuid import UUID, uuid4
8
9
 
@@ -170,7 +171,7 @@ class StandardWebhook(Webhook):
170
171
  def _assign_headers(self, uuid: UUID | None = None, at: Timestamp | None = None) -> None:
171
172
  message_id = f"msg_{uuid.hex}" if uuid else f"msg_{uuid4().hex}"
172
173
  timestamp = str(at.to_timestamp()) if at else str(Timestamp().to_timestamp())
173
- payload = self._payload or {}
174
+ payload = json.dumps(self._payload or {})
174
175
  unsigned_data = f"{message_id}.{timestamp}.{payload}".encode()
175
176
  signature = self._sign(data=unsigned_data)
176
177
 
@@ -251,6 +251,14 @@ COMPUTED_ATTRIBUTE_PROCESS_JINJA2 = WorkflowDefinition(
251
251
  tags=[WorkflowTag.DATABASE_CHANGE],
252
252
  )
253
253
 
254
+ COMPUTED_ATTRIBUTE_JINJA2_UPDATE_VALUE = WorkflowDefinition(
255
+ name="computed-attribute-jinja2-update-value",
256
+ type=WorkflowType.CORE,
257
+ module="infrahub.computed_attribute.tasks",
258
+ function="computed_attribute_jinja2_update_value",
259
+ tags=[WorkflowTag.DATABASE_CHANGE],
260
+ )
261
+
254
262
  TRIGGER_UPDATE_JINJA_COMPUTED_ATTRIBUTES = WorkflowDefinition(
255
263
  name="trigger_update_jinja2_computed_attributes",
256
264
  type=WorkflowType.CORE,
@@ -443,6 +451,7 @@ workflows = [
443
451
  BRANCH_MERGE_POST_PROCESS,
444
452
  BRANCH_REBASE,
445
453
  BRANCH_VALIDATE,
454
+ COMPUTED_ATTRIBUTE_JINJA2_UPDATE_VALUE,
446
455
  COMPUTED_ATTRIBUTE_PROCESS_JINJA2,
447
456
  COMPUTED_ATTRIBUTE_PROCESS_TRANSFORM,
448
457
  COMPUTED_ATTRIBUTE_SETUP_JINJA2,
infrahub_sdk/timestamp.py CHANGED
@@ -153,7 +153,7 @@ class Timestamp:
153
153
  nanoseconds: int = 0,
154
154
  disambiguate: Literal["compatible"] = "compatible",
155
155
  ) -> Timestamp:
156
- return Timestamp(
156
+ return self.__class__(
157
157
  self._obj.add(
158
158
  years=years,
159
159
  months=months,
@@ -183,7 +183,7 @@ class Timestamp:
183
183
  nanoseconds: int = 0,
184
184
  disambiguate: Literal["compatible"] = "compatible",
185
185
  ) -> Timestamp:
186
- return Timestamp(
186
+ return self.__class__(
187
187
  self._obj.subtract(
188
188
  years=years,
189
189
  months=months,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: infrahub-server
3
- Version: 1.2.9
3
+ Version: 1.2.10
4
4
  Summary: Infrahub is taking a new approach to Infrastructure Management by providing a new generation of datastore to organize and control all the data that defines how an infrastructure should run.
5
5
  Home-page: https://opsmill.com
6
6
  License: AGPL-3.0-only
@@ -77,7 +77,7 @@ Description-Content-Type: text/markdown
77
77
 
78
78
  Infrahub from [OpsMill](https://opsmill.com) is taking a new approach to Infrastructure Management by providing a new generation of datastore to organize and control all the data that defines how an infrastructure should run. Infrahub offers a central hub to manage the data, templates and playbooks that powers your infrastructure by combining the version control and branch management capabilities similar to Git with the flexible data model and UI of a graph database.
79
79
 
80
- If you just want to try Infrahub out, you can use our [Always-On Sandbox](https://demo.infrahub.app/) to get started.
80
+ If you just want to try Infrahub out, you can use our [Infrahub Sandbox](https://sandbox.infrahub.app/) to get started.
81
81
 
82
82
  ![infrahub screenshot](docs/docs/media/infrahub-readme.gif)
83
83
 
@@ -103,7 +103,7 @@ If you just want to try Infrahub out, you can use our [Always-On Sandbox](https:
103
103
 
104
104
  ## Quick Start
105
105
 
106
- [Always-On Sandbox](https://demo.infrahub.app/) - Instantly login to the UI of a demo environment of Infrahub with sample data pre-loaded.
106
+ [Infrahub Sandbox](https://sandbox.infrahub.app/) - Instantly login to the UI of a demo environment of Infrahub with sample data pre-loaded.
107
107
 
108
108
  [Getting Started Environment & Tutorial](https://opsmill.instruqt.com/pages/labs) - It spins up an instance of Infrahub on our cloud, provides a browser, terminal, code editor and walks you through the basic concepts:
109
109