infrahub-server 1.5.0b2__py3-none-any.whl → 1.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. infrahub/api/dependencies.py +4 -13
  2. infrahub/api/transformation.py +22 -20
  3. infrahub/cli/db.py +87 -65
  4. infrahub/cli/upgrade.py +27 -7
  5. infrahub/core/diff/calculator.py +2 -2
  6. infrahub/core/diff/query/delete_query.py +9 -5
  7. infrahub/core/diff/query/merge.py +39 -23
  8. infrahub/core/graph/__init__.py +1 -1
  9. infrahub/core/migrations/graph/__init__.py +5 -3
  10. infrahub/core/migrations/graph/m037_index_attr_vals.py +11 -30
  11. infrahub/core/migrations/graph/m039_ipam_reconcile.py +9 -7
  12. infrahub/core/migrations/graph/m041_deleted_dup_edges.py +149 -0
  13. infrahub/core/migrations/graph/{m041_profile_attrs_in_db.py → m042_profile_attrs_in_db.py} +10 -8
  14. infrahub/core/migrations/graph/{m042_create_hfid_display_label_in_db.py → m043_create_hfid_display_label_in_db.py} +6 -6
  15. infrahub/core/migrations/graph/{m043_backfill_hfid_display_label_in_db.py → m044_backfill_hfid_display_label_in_db.py} +9 -11
  16. infrahub/core/migrations/shared.py +14 -0
  17. infrahub/core/models.py +2 -2
  18. infrahub/core/node/__init__.py +26 -1
  19. infrahub/core/query/diff.py +61 -16
  20. infrahub/core/query/ipam.py +15 -4
  21. infrahub/core/query/node.py +42 -40
  22. infrahub/core/relationship/model.py +10 -5
  23. infrahub/core/schema/definitions/core/check.py +1 -1
  24. infrahub/core/schema/definitions/core/transform.py +1 -1
  25. infrahub/core/schema/schema_branch_display.py +12 -0
  26. infrahub/core/schema/schema_branch_hfid.py +6 -0
  27. infrahub/core/validators/uniqueness/checker.py +2 -1
  28. infrahub/database/__init__.py +0 -13
  29. infrahub/graphql/analyzer.py +9 -0
  30. infrahub/graphql/mutations/branch.py +5 -0
  31. infrahub/graphql/mutations/proposed_change.py +6 -0
  32. infrahub/message_bus/types.py +1 -0
  33. infrahub/profiles/queries/get_profile_data.py +4 -5
  34. infrahub/proposed_change/tasks.py +43 -9
  35. infrahub_sdk/analyzer.py +1 -1
  36. infrahub_sdk/batch.py +2 -2
  37. infrahub_sdk/branch.py +14 -2
  38. infrahub_sdk/checks.py +1 -1
  39. infrahub_sdk/client.py +2 -4
  40. infrahub_sdk/ctl/branch.py +3 -0
  41. infrahub_sdk/ctl/cli_commands.py +2 -0
  42. infrahub_sdk/ctl/exceptions.py +1 -1
  43. infrahub_sdk/ctl/task.py +110 -0
  44. infrahub_sdk/exceptions.py +18 -18
  45. infrahub_sdk/graphql/query.py +2 -2
  46. infrahub_sdk/node/attribute.py +1 -1
  47. infrahub_sdk/node/property.py +1 -1
  48. infrahub_sdk/node/related_node.py +3 -3
  49. infrahub_sdk/node/relationship.py +4 -6
  50. infrahub_sdk/object_store.py +2 -2
  51. infrahub_sdk/operation.py +1 -1
  52. infrahub_sdk/protocols_generator/generator.py +1 -1
  53. infrahub_sdk/pytest_plugin/exceptions.py +9 -9
  54. infrahub_sdk/pytest_plugin/items/base.py +1 -1
  55. infrahub_sdk/pytest_plugin/items/check.py +1 -1
  56. infrahub_sdk/pytest_plugin/items/python_transform.py +1 -1
  57. infrahub_sdk/repository.py +1 -1
  58. infrahub_sdk/schema/__init__.py +1 -1
  59. infrahub_sdk/spec/object.py +7 -3
  60. infrahub_sdk/task/exceptions.py +4 -4
  61. infrahub_sdk/task/manager.py +2 -2
  62. infrahub_sdk/task/models.py +6 -4
  63. infrahub_sdk/timestamp.py +1 -1
  64. infrahub_sdk/transfer/exporter/json.py +1 -1
  65. infrahub_sdk/transfer/importer/json.py +1 -1
  66. infrahub_sdk/transforms.py +1 -1
  67. {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/METADATA +2 -2
  68. {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/RECORD +75 -73
  69. infrahub_testcontainers/container.py +31 -5
  70. infrahub_testcontainers/helpers.py +19 -4
  71. infrahub_testcontainers/models.py +8 -6
  72. infrahub_testcontainers/performance_test.py +6 -4
  73. {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/LICENSE.txt +0 -0
  74. {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/WHEEL +0 -0
  75. {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/entry_points.txt +0 -0
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  from typing import TYPE_CHECKING, Any, Sequence, TypeAlias
4
4
 
5
5
  from pydantic import BaseModel, ConfigDict, Field
6
+ from rich.console import Console
6
7
  from typing_extensions import Self
7
8
 
8
9
  from infrahub.core import registry
@@ -13,6 +14,19 @@ from infrahub.core.timestamp import Timestamp
13
14
 
14
15
  from .query import MigrationBaseQuery # noqa: TC001
15
16
 
17
+ MIGRATION_LOG_TIME_FORMAT = "[%Y-%m-%d %H:%M:%S]"
18
+ _migration_console: Console | None = None
19
+
20
+
21
+ def get_migration_console() -> Console:
22
+ global _migration_console
23
+
24
+ if _migration_console is None:
25
+ _migration_console = Console(log_time_format=MIGRATION_LOG_TIME_FORMAT)
26
+
27
+ return _migration_console
28
+
29
+
16
30
  if TYPE_CHECKING:
17
31
  from infrahub.core.branch import Branch
18
32
  from infrahub.core.schema.schema_branch import SchemaBranch
infrahub/core/models.py CHANGED
@@ -404,8 +404,8 @@ class HashableModelDiff(BaseModel):
404
404
  class HashableModel(BaseModel):
405
405
  model_config = ConfigDict(extra="forbid")
406
406
 
407
- id: str | None = None
408
- state: HashableModelState = HashableModelState.PRESENT
407
+ id: str | None = Field(default=None)
408
+ state: HashableModelState = Field(default=HashableModelState.PRESENT)
409
409
 
410
410
  _exclude_from_hash: list[str] = []
411
411
  _sort_by: list[str] = []
@@ -752,9 +752,34 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
752
752
  return self
753
753
 
754
754
  async def resolve_relationships(self, db: InfrahubDatabase) -> None:
755
+ extra_filters: dict[str, set[str]] = {}
756
+
757
+ if not self._existing:
758
+ # If we are creating a new node, we need to resolve extra filters from HFID and Display Labels,
759
+ # if we don't do this the fields might be blank
760
+ schema_branch = db.schema.get_schema_branch(name=self.get_branch_based_on_support_type().name)
761
+ try:
762
+ hfid_identifier = schema_branch.hfids.get_node_definition(kind=self._schema.kind)
763
+ for rel_name, attrs in hfid_identifier.relationship_fields.items():
764
+ extra_filters.setdefault(rel_name, set()).update(attrs)
765
+ except KeyError:
766
+ # No HFID defined for this kind
767
+ ...
768
+ try:
769
+ display_label_identifier = schema_branch.display_labels.get_template_node(kind=self._schema.kind)
770
+ for rel_name, attrs in display_label_identifier.relationship_fields.items():
771
+ extra_filters.setdefault(rel_name, set()).update(attrs)
772
+ except KeyError:
773
+ # No Display Label defined for this kind
774
+ ...
775
+
755
776
  for name in self._relationships:
756
777
  relm: RelationshipManager = getattr(self, name)
757
- await relm.resolve(db=db)
778
+ query_filter = []
779
+ if name in extra_filters:
780
+ query_filter.extend(list(extra_filters[name]))
781
+
782
+ await relm.resolve(db=db, fields=query_filter)
758
783
 
759
784
  async def load(
760
785
  self,
@@ -301,13 +301,15 @@ WITH p, q, diff_rel, CASE
301
301
  ELSE $from_time
302
302
  END AS row_from_time
303
303
  ORDER BY %(id_func)s(p) DESC
304
- SKIP $offset
305
- LIMIT $limit
304
+ SKIP toInteger($offset)
305
+ LIMIT toInteger($limit)
306
306
  // -------------------------------------
307
307
  // Add flag to indicate if there is more data after this
308
308
  // -------------------------------------
309
309
  WITH collect([p, q, diff_rel, row_from_time]) AS limited_results
310
- WITH limited_results, size(limited_results) = $limit AS has_more_data
310
+ // extra NULL row ensures that has_more_data is always returned, even if all results are filtered out below
311
+ WITH limited_results + [[NULL, NULL, NULL, NULL]] AS limited_results
312
+ WITH limited_results, size(limited_results) = ($limit + 1) AS has_more_data
311
313
  UNWIND limited_results AS one_result
312
314
  WITH one_result[0] AS p, one_result[1] AS q, one_result[2] AS diff_rel, one_result[3] AS row_from_time, has_more_data
313
315
  // -------------------------------------
@@ -470,14 +472,16 @@ AND (
470
472
  // Limit the number of paths
471
473
  // -------------------------------------
472
474
  WITH root, r_root, p, diff_rel, q
473
- ORDER BY r_root.from, p.uuid, q.uuid, diff_rel.branch, diff_rel.from
474
- SKIP $offset
475
- LIMIT $limit
475
+ ORDER BY r_root.from, p.uuid, q.uuid, q.name, diff_rel.branch, diff_rel.from
476
+ SKIP toInteger($offset)
477
+ LIMIT toInteger($limit)
476
478
  // -------------------------------------
477
479
  // Add flag to indicate if there is more data after this
478
480
  // -------------------------------------
479
481
  WITH collect([root, r_root, p, diff_rel, q]) AS limited_results
480
- WITH limited_results, size(limited_results) = $limit AS has_more_data
482
+ // extra NULL row ensures that has_more_data is always returned, even if all results are filtered out below
483
+ WITH limited_results + [[NULL, NULL, NULL, NULL, NULL]] AS limited_results
484
+ WITH limited_results, size(limited_results) = ($limit + 1) AS has_more_data
481
485
  UNWIND limited_results AS one_result
482
486
  WITH one_result[0] AS root, one_result[1] AS r_root, one_result[2] AS p, one_result[3] AS diff_rel, one_result[4] AS q, has_more_data
483
487
  // -------------------------------------
@@ -641,8 +645,28 @@ AND (
641
645
  )
642
646
  // skip paths where nodes/attrs/rels are updated after $from_time, those are handled in other queries
643
647
  AND (
644
- r_root.from <= $from_time AND (r_root.to IS NULL OR r_root.branch <> diff_rel.branch OR r_root.to <= $from_time)
645
- AND r_node.from <= $from_time AND (r_node.to IS NULL OR r_node.branch <> diff_rel.branch OR r_node.to <= $from_time)
648
+ (
649
+ r_root.branch = diff_rel.branch
650
+ AND r_root.from <= $from_time
651
+ AND (r_root.to IS NULL OR r_root.to >= $to_time)
652
+ )
653
+ OR (
654
+ r_root.branch <> diff_rel.branch
655
+ AND r_root.from <= $from_time
656
+ AND (r_root.to IS NULL OR r_root.to >= $branch_from_time)
657
+ )
658
+ )
659
+ AND (
660
+ (
661
+ r_node.branch = diff_rel.branch
662
+ AND r_node.from <= $from_time
663
+ AND (r_node.to IS NULL OR r_node.to >= $to_time)
664
+ )
665
+ OR (
666
+ r_node.branch <> diff_rel.branch
667
+ AND r_node.from <= $from_time
668
+ AND (r_node.to IS NULL OR r_node.to >= $branch_from_time)
669
+ )
646
670
  )
647
671
  )
648
672
  // time-based filters for new nodes
@@ -658,8 +682,27 @@ AND (
658
682
  )
659
683
  // skip paths where nodes/attrs/rels are updated after $branch_from_time, those are handled in other queries
660
684
  AND (
661
- r_root.from <= $branch_from_time AND (r_root.to IS NULL OR r_root.branch <> diff_rel.branch OR r_root.to <= $branch_from_time)
662
- AND r_node.from <= $branch_from_time AND (r_node.to IS NULL OR r_node.branch <> diff_rel.branch OR r_node.to <= $branch_from_time)
685
+ (
686
+ r_root.branch = diff_rel.branch
687
+ AND (r_root.to IS NULL OR r_root.to >= $to_time)
688
+ )
689
+ OR (
690
+ r_root.branch <> diff_rel.branch
691
+ AND r_root.from <= $branch_from_time
692
+ AND (r_root.to IS NULL OR r_root.to >= $branch_from_time)
693
+ )
694
+ )
695
+ AND (
696
+ (
697
+ r_node.branch = diff_rel.branch
698
+ AND r_node.from <= $branch_from_time
699
+ AND (r_node.to IS NULL OR r_node.to >= $to_time)
700
+ )
701
+ OR (
702
+ r_node.branch <> diff_rel.branch
703
+ AND r_node.from <= $branch_from_time
704
+ AND (r_node.to IS NULL OR r_node.to >= $branch_from_time)
705
+ )
663
706
  )
664
707
  )
665
708
  )
@@ -701,13 +744,15 @@ AND [%(id_func)s(n), type(r_node)] <> [%(id_func)s(q), type(diff_rel)]
701
744
  // -------------------------------------
702
745
  WITH diff_rel_path, r_root, n, r_node, p, diff_rel
703
746
  ORDER BY r_root.from, n.uuid, p.uuid, type(diff_rel), diff_rel.branch, diff_rel.from
704
- SKIP $offset
705
- LIMIT $limit
747
+ SKIP toInteger($offset)
748
+ LIMIT toInteger($limit)
706
749
  // -------------------------------------
707
750
  // Add flag to indicate if there is more data after this
708
751
  // -------------------------------------
709
752
  WITH collect([diff_rel_path, r_root, n, r_node, p, diff_rel]) AS limited_results
710
- WITH limited_results, size(limited_results) = $limit AS has_more_data
753
+ // extra NULL row ensures that has_more_data is always returned, even if all results are filtered out below
754
+ WITH limited_results + [[NULL, NULL, NULL, NULL, NULL, NULL]] AS limited_results
755
+ WITH limited_results, size(limited_results) = ($limit + 1) AS has_more_data
711
756
  UNWIND limited_results AS one_result
712
757
  WITH one_result[0] AS diff_rel_path, one_result[1] AS r_root, one_result[2] AS n,
713
758
  one_result[3] AS r_node, one_result[4] AS p, one_result[5] AS diff_rel, has_more_data
@@ -803,8 +848,8 @@ WHERE num_nodes_with_uuid > 1
803
848
  // -------------------------------------
804
849
  WITH node_uuid
805
850
  ORDER BY node_uuid
806
- SKIP $offset
807
- LIMIT $limit
851
+ SKIP toInteger($offset)
852
+ LIMIT toInteger($limit)
808
853
  WITH collect(node_uuid) AS node_uuids
809
854
  WITH node_uuids, size(node_uuids) = $limit AS has_more_data
810
855
  MATCH (:Root)<-[diff_rel:IS_PART_OF {branch: $branch_name}]-(n:Node)
@@ -450,12 +450,23 @@ class IPPrefixReconcileQuery(Query):
450
450
  // ------------------
451
451
  CALL (ip_node) {
452
452
  OPTIONAL MATCH parent_prefix_path = (ip_node)-[r1:IS_RELATED]->(:Relationship {name: "parent__child"})-[r2:IS_RELATED]->(current_parent:%(ip_prefix_kind)s)
453
- WHERE all(r IN relationships(parent_prefix_path) WHERE (%(branch_filter)s))
453
+ WHERE $is_prefix = TRUE
454
+ AND all(r IN relationships(parent_prefix_path) WHERE (%(branch_filter)s))
454
455
  RETURN current_parent, (r1.status = "active" AND r2.status = "active") AS parent_is_active
455
456
  ORDER BY r1.branch_level DESC, r1.from DESC, r1.status ASC, r2.branch_level DESC, r2.from DESC, r2.status ASC
456
457
  LIMIT 1
457
458
  }
458
- WITH ip_namespace, ip_node, CASE WHEN parent_is_active THEN current_parent ELSE NULL END as current_parent
459
+ WITH ip_namespace, ip_node, CASE WHEN parent_is_active THEN current_parent ELSE NULL END as prefix_parent
460
+ CALL (ip_node) {
461
+ OPTIONAL MATCH parent_prefix_path = (ip_node)-[r1:IS_RELATED]->(:Relationship {name: "ip_prefix__ip_address"})<-[r2:IS_RELATED]-(current_parent:%(ip_prefix_kind)s)
462
+ WHERE $is_prefix = FALSE
463
+ AND all(r IN relationships(parent_prefix_path) WHERE (%(branch_filter)s))
464
+ RETURN current_parent, (r1.status = "active" AND r2.status = "active") AS parent_is_active
465
+ ORDER BY r1.branch_level DESC, r1.from DESC, r1.status ASC, r2.branch_level DESC, r2.from DESC, r2.status ASC
466
+ LIMIT 1
467
+ }
468
+ WITH ip_namespace, ip_node, prefix_parent, CASE WHEN parent_is_active THEN current_parent ELSE NULL END as address_parent
469
+ WITH ip_namespace, ip_node, COALESCE(prefix_parent, address_parent) AS current_parent
459
470
  """ % {
460
471
  "branch_filter": branch_filter,
461
472
  "ip_prefix_kind": InfrahubKind.IPPREFIX,
@@ -467,7 +478,7 @@ class IPPrefixReconcileQuery(Query):
467
478
  // Get prefix node's current prefix children, if any exist
468
479
  // ------------------
469
480
  CALL (ip_node) {
470
- OPTIONAL MATCH child_prefix_path = (ip_node)<-[r1:IS_RELATED]-(:Relationship {name: "parent__child"})<-[r2:IS_RELATED]-(current_prefix_child:%(ip_prefix_kind)s)
481
+ OPTIONAL MATCH child_prefix_path = (ip_node:%(ip_prefix_kind)s)<-[r1:IS_RELATED]-(:Relationship {name: "parent__child"})<-[r2:IS_RELATED]-(current_prefix_child:%(ip_prefix_kind)s)
471
482
  WHERE all(r IN relationships(child_prefix_path) WHERE (%(branch_filter)s))
472
483
  WITH current_prefix_child, (r1.status = "active" AND r2.status = "active") AS is_active
473
484
  ORDER BY current_prefix_child.uuid, r1.branch_level DESC, r1.from DESC, r2.branch_level DESC, r2.from DESC
@@ -479,7 +490,7 @@ class IPPrefixReconcileQuery(Query):
479
490
  // Get prefix node's current address children, if any exist
480
491
  // ------------------
481
492
  CALL (ip_node) {
482
- OPTIONAL MATCH child_address_path = (ip_node)-[r1:IS_RELATED]-(:Relationship {name: "ip_prefix__ip_address"})-[r2:IS_RELATED]-(current_address_child:%(ip_address_kind)s)
493
+ OPTIONAL MATCH child_address_path = (ip_node:%(ip_prefix_kind)s)-[r1:IS_RELATED]->(:Relationship {name: "ip_prefix__ip_address"})<-[r2:IS_RELATED]-(current_address_child:%(ip_address_kind)s)
483
494
  WHERE all(r IN relationships(child_address_path) WHERE (%(branch_filter)s))
484
495
  WITH current_address_child, (r1.status = "active" AND r2.status = "active") AS is_active
485
496
  ORDER BY current_address_child.uuid, r1.branch_level DESC, r1.from DESC, r2.branch_level DESC, r2.from DESC
@@ -642,53 +642,55 @@ class NodeListGetAttributeQuery(Query):
642
642
  self.add_to_query(query)
643
643
 
644
644
  query = """
645
- CALL (n, a) {
646
- MATCH (n)-[r:HAS_ATTRIBUTE]-(a:Attribute)
647
- WHERE %(branch_filter)s
648
- RETURN r AS r1
649
- ORDER BY r.branch_level DESC, r.from DESC
650
- LIMIT 1
651
- }
652
- WITH n, r1, a, might_use_profile
653
- WHERE r1.status = "active"
654
- WITH n, r1, a, might_use_profile
655
- MATCH (a)-[r:HAS_VALUE]-(av:AttributeValue)
656
- WHERE %(branch_filter)s
657
- CALL (a, might_use_profile) {
658
- OPTIONAL MATCH (a)-[r:HAS_SOURCE]->(:CoreProfile)
659
- WHERE might_use_profile = TRUE AND %(branch_filter)s
660
- RETURN r.status = "active" AS has_active_profile
661
- ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
662
- LIMIT 1
663
- }
664
- WITH *, has_active_profile = TRUE AS is_from_profile
665
- CALL (a, av) {
666
- MATCH (a)-[r:HAS_VALUE]-(av:AttributeValue)
667
- WHERE %(branch_filter)s
668
- RETURN a as a1, r as r2, av as av1
669
- ORDER BY r.branch_level DESC, r.from DESC
670
- LIMIT 1
671
- }
672
- WITH n, r1, a1 as a, r2, av1 as av, is_from_profile
673
- WHERE r2.status = "active"
674
- WITH n, a, av, r1, r2, is_from_profile
645
+ CALL (n, a) {
646
+ MATCH (n)-[r:HAS_ATTRIBUTE]-(a:Attribute)
647
+ WHERE %(branch_filter)s
648
+ RETURN r AS r1
649
+ ORDER BY r.branch_level DESC, r.from DESC
650
+ LIMIT 1
651
+ }
652
+ WITH n, r1, a, might_use_profile
653
+ WHERE r1.status = "active"
654
+ WITH n, r1, a, might_use_profile
655
+ CALL (a, might_use_profile) {
656
+ OPTIONAL MATCH (a)-[r:HAS_SOURCE]->(:CoreProfile)
657
+ WHERE might_use_profile = TRUE AND %(branch_filter)s
658
+ RETURN r.status = "active" AS has_active_profile
659
+ ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
660
+ LIMIT 1
661
+ }
662
+ WITH *, has_active_profile = TRUE AS is_from_profile
663
+ CALL (a) {
664
+ MATCH (a)-[r:HAS_VALUE]-(av:AttributeValue)
665
+ WHERE %(branch_filter)s
666
+ RETURN r as r2, av
667
+ ORDER BY r.branch_level DESC, r.from DESC
668
+ LIMIT 1
669
+ }
670
+ WITH n, r1, a, r2, av, is_from_profile
671
+ WHERE r2.status = "active"
675
672
  """ % {"branch_filter": branch_filter}
676
673
  self.add_to_query(query)
677
674
 
678
675
  self.return_labels = ["n", "a", "av", "r1", "r2", "is_from_profile"]
679
676
 
680
677
  # Add Is_Protected and Is_visible
681
- rel_isv_branch_filter, _ = self.branch.get_query_filter_path(
682
- at=self.at, branch_agnostic=self.branch_agnostic, variable_name="rel_isv"
683
- )
684
- rel_isp_branch_filter, _ = self.branch.get_query_filter_path(
685
- at=self.at, branch_agnostic=self.branch_agnostic, variable_name="rel_isp"
686
- )
687
678
  query = """
688
- MATCH (a)-[rel_isv:IS_VISIBLE]-(isv:Boolean)
689
- MATCH (a)-[rel_isp:IS_PROTECTED]-(isp:Boolean)
690
- WHERE (%(rel_isv_branch_filter)s) AND (%(rel_isp_branch_filter)s)
691
- """ % {"rel_isv_branch_filter": rel_isv_branch_filter, "rel_isp_branch_filter": rel_isp_branch_filter}
679
+ CALL (a) {
680
+ MATCH (a)-[r:IS_VISIBLE]-(isv:Boolean)
681
+ WHERE (%(branch_filter)s)
682
+ RETURN r AS rel_isv, isv
683
+ ORDER BY rel_isv.branch_level DESC, rel_isv.from DESC, rel_isv.status ASC
684
+ LIMIT 1
685
+ }
686
+ CALL (a) {
687
+ MATCH (a)-[r:IS_PROTECTED]-(isp:Boolean)
688
+ WHERE (%(branch_filter)s)
689
+ RETURN r AS rel_isp, isp
690
+ ORDER BY rel_isp.branch_level DESC, rel_isp.from DESC, rel_isp.status ASC
691
+ LIMIT 1
692
+ }
693
+ """ % {"branch_filter": branch_filter}
692
694
  self.add_to_query(query)
693
695
 
694
696
  self.return_labels.extend(["isv", "isp", "rel_isv", "rel_isp"])
@@ -445,15 +445,20 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
445
445
  )
446
446
  await delete_query.execute(db=db)
447
447
 
448
- async def resolve(self, db: InfrahubDatabase, at: Timestamp | None = None) -> None:
448
+ async def resolve(self, db: InfrahubDatabase, at: Timestamp | None = None, fields: list[str] | None = None) -> None:
449
449
  """Resolve the peer of the relationship."""
450
450
 
451
+ fields = fields or []
452
+ query_fields = dict.fromkeys(fields)
453
+ if "display_label" not in query_fields:
454
+ query_fields["display_label"] = None
455
+
451
456
  if self._peer is not None:
452
457
  return
453
458
 
454
459
  if self.peer_id and not is_valid_uuid(self.peer_id):
455
460
  peer = await registry.manager.get_one_by_default_filter(
456
- db=db, id=self.peer_id, branch=self.branch, kind=self.schema.peer, fields={"display_label": None}
461
+ db=db, id=self.peer_id, branch=self.branch, kind=self.schema.peer, fields=query_fields
457
462
  )
458
463
  if peer:
459
464
  self.set_peer(value=peer)
@@ -470,7 +475,7 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
470
475
  hfid=self.peer_hfid,
471
476
  branch=self.branch,
472
477
  kind=kind,
473
- fields={"display_label": None},
478
+ fields=query_fields,
474
479
  raise_on_error=True,
475
480
  )
476
481
  self.set_peer(value=peer)
@@ -1142,9 +1147,9 @@ class RelationshipManager:
1142
1147
 
1143
1148
  return True
1144
1149
 
1145
- async def resolve(self, db: InfrahubDatabase) -> None:
1150
+ async def resolve(self, db: InfrahubDatabase, fields: list[str] | None = None) -> None:
1146
1151
  for rel in self._relationships:
1147
- await rel.resolve(db=db)
1152
+ await rel.resolve(db=db, fields=fields)
1148
1153
 
1149
1154
  async def remove_locally(
1150
1155
  self,
@@ -29,7 +29,7 @@ core_check_definition = NodeSchema(
29
29
  Attr(name="description", kind="Text", optional=True),
30
30
  Attr(name="file_path", kind="Text"),
31
31
  Attr(name="class_name", kind="Text"),
32
- Attr(name="timeout", kind="Number", default_value=10),
32
+ Attr(name="timeout", kind="Number", default_value=60),
33
33
  Attr(name="parameters", kind="JSON", optional=True),
34
34
  ],
35
35
  relationships=[
@@ -29,7 +29,7 @@ core_transform = GenericSchema(
29
29
  Attr(name="name", kind="Text", unique=True),
30
30
  Attr(name="label", kind="Text", optional=True),
31
31
  Attr(name="description", kind="Text", optional=True),
32
- Attr(name="timeout", kind="Number", default_value=10),
32
+ Attr(name="timeout", kind="Number", default_value=60),
33
33
  ],
34
34
  relationships=[
35
35
  Rel(
@@ -14,6 +14,7 @@ class TemplateLabel:
14
14
  template: str
15
15
  attributes: set[str] = field(default_factory=set)
16
16
  relationships: set[str] = field(default_factory=set)
17
+ relationship_fields: dict[str, set[str]] = field(default_factory=dict)
17
18
  filter_key: str = "ids"
18
19
 
19
20
  @property
@@ -76,6 +77,17 @@ class DisplayLabels:
76
77
  self._template_based_display_labels[kind].attributes.add(schema_path.active_attribute_schema.name)
77
78
  elif schema_path.is_type_relationship and schema_path.related_schema:
78
79
  self._template_based_display_labels[kind].relationships.add(schema_path.active_relationship_schema.name)
80
+ if (
81
+ schema_path.active_relationship_schema.name
82
+ not in self._template_based_display_labels[kind].relationship_fields
83
+ ):
84
+ self._template_based_display_labels[kind].relationship_fields[
85
+ schema_path.active_relationship_schema.name
86
+ ] = set()
87
+ self._template_based_display_labels[kind].relationship_fields[
88
+ schema_path.active_relationship_schema.name
89
+ ].add(schema_path.active_attribute_schema.name)
90
+
79
91
  if schema_path.related_schema.kind not in self._template_relationship_triggers:
80
92
  self._template_relationship_triggers[schema_path.related_schema.kind] = RelationshipTriggers()
81
93
  if (
@@ -14,6 +14,7 @@ class HFIDDefinition:
14
14
  hfid: list[str]
15
15
  attributes: set[str] = field(default_factory=set)
16
16
  relationships: set[str] = field(default_factory=set)
17
+ relationship_fields: dict[str, set[str]] = field(default_factory=dict)
17
18
  filter_key: str = "ids"
18
19
 
19
20
  @property
@@ -67,6 +68,11 @@ class HFIDs:
67
68
  self._node_level_hfids[kind].attributes.add(schema_path.active_attribute_schema.name)
68
69
  elif schema_path.is_type_relationship and schema_path.related_schema:
69
70
  self._node_level_hfids[kind].relationships.add(schema_path.active_relationship_schema.name)
71
+ if schema_path.active_relationship_schema.name not in self._node_level_hfids[kind].relationship_fields:
72
+ self._node_level_hfids[kind].relationship_fields[schema_path.active_relationship_schema.name] = set()
73
+ self._node_level_hfids[kind].relationship_fields[schema_path.active_relationship_schema.name].add(
74
+ schema_path.active_attribute_schema.name
75
+ )
70
76
  if schema_path.related_schema.kind not in self._relationship_triggers:
71
77
  self._relationship_triggers[schema_path.related_schema.kind] = RelationshipTriggers()
72
78
  if (
@@ -122,7 +122,8 @@ class UniquenessChecker(ConstraintCheckerInterface):
122
122
  db=self.db, branch=await self.get_branch(), query_request=query_request
123
123
  )
124
124
  async with self.semaphore:
125
- query_results = await query.execute(db=self.db.start_session(read_only=True))
125
+ async with self.db.start_session(read_only=True) as db:
126
+ query_results = await query.execute(db=db)
126
127
 
127
128
  return await self._parse_results(schema=schema, query_results=query_results.results)
128
129
 
@@ -173,19 +173,6 @@ class InfrahubDatabase:
173
173
  else:
174
174
  self.db_type = config.SETTINGS.database.db_type
175
175
 
176
- def __del__(self) -> None:
177
- if not self._session or not self._is_session_local or self._session.closed():
178
- return
179
-
180
- try:
181
- loop = asyncio.get_running_loop()
182
- except RuntimeError:
183
- loop = None
184
- if loop and loop.is_running():
185
- loop.create_task(self._session.close())
186
- else:
187
- asyncio.run(self._session.close())
188
-
189
176
  @property
190
177
  def is_session(self) -> bool:
191
178
  if self._mode == InfrahubDatabaseMode.SESSION:
@@ -312,6 +312,13 @@ class GraphQLQueryReport:
312
312
  return []
313
313
 
314
314
  def required_argument(self, argument: GraphQLArgument) -> bool:
315
+ if argument.name == "ids" and argument.kind == "list_value":
316
+ for variable in self.variables:
317
+ if f"['${variable.name}']" == argument.as_variable_name and variable.required:
318
+ return True
319
+
320
+ return False
321
+
315
322
  if not argument.is_variable:
316
323
  # If the argument isn't a variable it would have been
317
324
  # statically defined in the input and as such required
@@ -364,6 +371,8 @@ class GraphQLQueryReport:
364
371
  if [[argument.name]] == query.infrahub_model.uniqueness_constraints:
365
372
  if self.required_argument(argument=argument):
366
373
  targets_single_query = True
374
+ elif argument.name == "ids" and self.required_argument(argument=argument):
375
+ targets_single_query = True
367
376
 
368
377
  if not targets_single_query:
369
378
  return False
@@ -9,6 +9,7 @@ from typing_extensions import Self
9
9
  from infrahub.branch.merge_mutation_checker import verify_branch_merge_mutation_allowed
10
10
  from infrahub.core import registry
11
11
  from infrahub.core.branch import Branch
12
+ from infrahub.core.branch.enums import BranchStatus
12
13
  from infrahub.database import retry_db_transaction
13
14
  from infrahub.exceptions import BranchNotFoundError, ValidationError
14
15
  from infrahub.graphql.context import apply_external_context
@@ -290,6 +291,10 @@ class BranchMerge(Mutation):
290
291
  db=graphql_context.db, account_session=graphql_context.active_account_session
291
292
  )
292
293
 
294
+ obj = await Branch.get_by_name(db=graphql_context.db, name=branch_name)
295
+ if obj.status == BranchStatus.NEED_UPGRADE_REBASE:
296
+ raise ValidationError(f"Cannot merge branch '{branch_name}' with status '{obj.status.name}'")
297
+
293
298
  if wait_until_completion:
294
299
  await graphql_context.active_service.workflow.execute_workflow(
295
300
  workflow=BRANCH_MERGE_MUTATION,
@@ -8,6 +8,7 @@ from graphql import GraphQLResolveInfo
8
8
  from infrahub import lock
9
9
  from infrahub.core.account import GlobalPermission
10
10
  from infrahub.core.branch import Branch
11
+ from infrahub.core.branch.enums import BranchStatus
11
12
  from infrahub.core.constants import (
12
13
  CheckType,
13
14
  GlobalPermissions,
@@ -156,6 +157,11 @@ class InfrahubProposedChangeMutation(InfrahubMutationMixin, Mutation):
156
157
  if updated_state == ProposedChangeState.MERGED:
157
158
  if will_be_draft:
158
159
  raise ValidationError("A draft proposed change is not allowed to be merged")
160
+
161
+ source_branch = await Branch.get_by_name(db=graphql_context.db, name=obj.source_branch.value)
162
+ if source_branch.status == BranchStatus.NEED_UPGRADE_REBASE:
163
+ raise ValidationError("The branch must be upgraded and rebased prior to merging the proposed change")
164
+
159
165
  data["state"]["value"] = ProposedChangeState.MERGING.value
160
166
 
161
167
  proposed_change, result = await super().mutate_update(
@@ -92,6 +92,7 @@ class ProposedChangeArtifactDefinition(BaseModel):
92
92
  query_name: str # Deprecated
93
93
  query_id: str
94
94
  query_models: list[str]
95
+ query_payload: str = Field(..., description="GraphQL query")
95
96
  repository_id: str
96
97
  transform_kind: str
97
98
  template_path: str = Field(default="")
@@ -57,18 +57,17 @@ CALL (profile, attr) {
57
57
  ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
58
58
  RETURN r.status = "active" AS is_active
59
59
  }
60
- WITH profile, attr, is_active
60
+ WITH profile, attr
61
61
  WHERE is_active = TRUE
62
62
  // --------------
63
63
  // get the attribute values
64
64
  // --------------
65
- MATCH (attr)-[:HAS_VALUE]->(av:AttributeValue)
66
- WITH DISTINCT profile, attr, av
67
- CALL (attr, av) {
65
+ CALL (attr) {
68
66
  MATCH (attr)-[r:HAS_VALUE]->(av)
69
67
  WHERE %(branch_filter)s
70
68
  ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
71
- RETURN r.status = "active" AS is_active
69
+ RETURN av, r.status = "active" AS is_active
70
+ LIMIT 1
72
71
  }
73
72
  WITH profile, attr, av
74
73
  WHERE is_active = TRUE