infrahub-server 1.5.0b1__py3-none-any.whl → 1.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (171) hide show
  1. infrahub/api/dependencies.py +4 -13
  2. infrahub/api/internal.py +2 -0
  3. infrahub/api/oauth2.py +13 -19
  4. infrahub/api/oidc.py +15 -21
  5. infrahub/api/schema.py +24 -3
  6. infrahub/api/transformation.py +22 -20
  7. infrahub/artifacts/models.py +2 -1
  8. infrahub/auth.py +137 -3
  9. infrahub/cli/__init__.py +2 -0
  10. infrahub/cli/db.py +158 -155
  11. infrahub/cli/dev.py +118 -0
  12. infrahub/cli/tasks.py +46 -0
  13. infrahub/cli/upgrade.py +56 -9
  14. infrahub/computed_attribute/tasks.py +20 -8
  15. infrahub/core/attribute.py +10 -2
  16. infrahub/core/branch/enums.py +1 -1
  17. infrahub/core/branch/models.py +7 -3
  18. infrahub/core/branch/tasks.py +68 -7
  19. infrahub/core/constants/__init__.py +3 -0
  20. infrahub/core/diff/calculator.py +2 -2
  21. infrahub/core/diff/query/artifact.py +1 -0
  22. infrahub/core/diff/query/delete_query.py +9 -5
  23. infrahub/core/diff/query/field_summary.py +1 -0
  24. infrahub/core/diff/query/merge.py +39 -23
  25. infrahub/core/graph/__init__.py +1 -1
  26. infrahub/core/initialization.py +5 -2
  27. infrahub/core/migrations/__init__.py +3 -0
  28. infrahub/core/migrations/exceptions.py +4 -0
  29. infrahub/core/migrations/graph/__init__.py +12 -13
  30. infrahub/core/migrations/graph/load_schema_branch.py +21 -0
  31. infrahub/core/migrations/graph/m013_convert_git_password_credential.py +1 -1
  32. infrahub/core/migrations/graph/m037_index_attr_vals.py +11 -30
  33. infrahub/core/migrations/graph/m039_ipam_reconcile.py +9 -7
  34. infrahub/core/migrations/graph/m040_duplicated_attributes.py +81 -0
  35. infrahub/core/migrations/graph/m041_deleted_dup_edges.py +149 -0
  36. infrahub/core/migrations/graph/m042_profile_attrs_in_db.py +147 -0
  37. infrahub/core/migrations/graph/m043_create_hfid_display_label_in_db.py +164 -0
  38. infrahub/core/migrations/graph/m044_backfill_hfid_display_label_in_db.py +864 -0
  39. infrahub/core/migrations/query/__init__.py +7 -8
  40. infrahub/core/migrations/query/attribute_add.py +8 -6
  41. infrahub/core/migrations/query/attribute_remove.py +134 -0
  42. infrahub/core/migrations/runner.py +54 -0
  43. infrahub/core/migrations/schema/attribute_kind_update.py +9 -3
  44. infrahub/core/migrations/schema/attribute_supports_profile.py +90 -0
  45. infrahub/core/migrations/schema/node_attribute_add.py +30 -2
  46. infrahub/core/migrations/schema/node_attribute_remove.py +13 -109
  47. infrahub/core/migrations/schema/node_kind_update.py +2 -1
  48. infrahub/core/migrations/schema/node_remove.py +2 -1
  49. infrahub/core/migrations/schema/placeholder_dummy.py +3 -2
  50. infrahub/core/migrations/shared.py +62 -14
  51. infrahub/core/models.py +2 -2
  52. infrahub/core/node/__init__.py +42 -12
  53. infrahub/core/node/create.py +46 -63
  54. infrahub/core/node/lock_utils.py +70 -44
  55. infrahub/core/node/resource_manager/ip_address_pool.py +2 -1
  56. infrahub/core/node/resource_manager/ip_prefix_pool.py +2 -1
  57. infrahub/core/node/resource_manager/number_pool.py +2 -1
  58. infrahub/core/query/attribute.py +55 -0
  59. infrahub/core/query/diff.py +61 -16
  60. infrahub/core/query/ipam.py +16 -4
  61. infrahub/core/query/node.py +51 -43
  62. infrahub/core/query/relationship.py +1 -0
  63. infrahub/core/relationship/model.py +10 -5
  64. infrahub/core/schema/__init__.py +56 -0
  65. infrahub/core/schema/attribute_schema.py +4 -0
  66. infrahub/core/schema/definitions/core/check.py +1 -1
  67. infrahub/core/schema/definitions/core/transform.py +1 -1
  68. infrahub/core/schema/definitions/internal.py +2 -2
  69. infrahub/core/schema/generated/attribute_schema.py +2 -2
  70. infrahub/core/schema/manager.py +22 -1
  71. infrahub/core/schema/schema_branch.py +180 -22
  72. infrahub/core/schema/schema_branch_display.py +12 -0
  73. infrahub/core/schema/schema_branch_hfid.py +6 -0
  74. infrahub/core/validators/uniqueness/checker.py +2 -1
  75. infrahub/database/__init__.py +0 -13
  76. infrahub/database/graph.py +21 -0
  77. infrahub/display_labels/tasks.py +13 -7
  78. infrahub/events/branch_action.py +27 -1
  79. infrahub/generators/tasks.py +3 -7
  80. infrahub/git/base.py +4 -1
  81. infrahub/git/integrator.py +1 -1
  82. infrahub/git/models.py +2 -1
  83. infrahub/git/repository.py +22 -5
  84. infrahub/git/tasks.py +66 -10
  85. infrahub/git/utils.py +123 -1
  86. infrahub/graphql/analyzer.py +9 -0
  87. infrahub/graphql/api/endpoints.py +14 -4
  88. infrahub/graphql/manager.py +4 -9
  89. infrahub/graphql/mutations/branch.py +5 -0
  90. infrahub/graphql/mutations/convert_object_type.py +11 -1
  91. infrahub/graphql/mutations/display_label.py +17 -10
  92. infrahub/graphql/mutations/hfid.py +17 -10
  93. infrahub/graphql/mutations/ipam.py +54 -35
  94. infrahub/graphql/mutations/main.py +27 -28
  95. infrahub/graphql/mutations/proposed_change.py +6 -0
  96. infrahub/graphql/schema_sort.py +170 -0
  97. infrahub/graphql/types/branch.py +4 -1
  98. infrahub/graphql/types/enums.py +3 -0
  99. infrahub/hfid/tasks.py +13 -7
  100. infrahub/lock.py +52 -12
  101. infrahub/message_bus/types.py +3 -1
  102. infrahub/permissions/constants.py +2 -0
  103. infrahub/profiles/queries/get_profile_data.py +4 -5
  104. infrahub/proposed_change/tasks.py +66 -23
  105. infrahub/server.py +6 -2
  106. infrahub/services/__init__.py +2 -2
  107. infrahub/services/adapters/http/__init__.py +5 -0
  108. infrahub/services/adapters/workflow/worker.py +14 -3
  109. infrahub/task_manager/event.py +5 -0
  110. infrahub/task_manager/models.py +7 -0
  111. infrahub/task_manager/task.py +73 -0
  112. infrahub/trigger/setup.py +13 -4
  113. infrahub/trigger/tasks.py +3 -0
  114. infrahub/workers/dependencies.py +10 -1
  115. infrahub/workers/infrahub_async.py +10 -2
  116. infrahub/workflows/catalogue.py +8 -0
  117. infrahub/workflows/initialization.py +5 -0
  118. infrahub/workflows/utils.py +2 -1
  119. infrahub_sdk/analyzer.py +1 -1
  120. infrahub_sdk/batch.py +2 -2
  121. infrahub_sdk/branch.py +14 -2
  122. infrahub_sdk/checks.py +1 -1
  123. infrahub_sdk/client.py +15 -14
  124. infrahub_sdk/config.py +29 -2
  125. infrahub_sdk/ctl/branch.py +3 -0
  126. infrahub_sdk/ctl/cli_commands.py +2 -0
  127. infrahub_sdk/ctl/exceptions.py +1 -1
  128. infrahub_sdk/ctl/schema.py +22 -7
  129. infrahub_sdk/ctl/task.py +110 -0
  130. infrahub_sdk/exceptions.py +18 -18
  131. infrahub_sdk/graphql/query.py +2 -2
  132. infrahub_sdk/node/attribute.py +1 -1
  133. infrahub_sdk/node/property.py +1 -1
  134. infrahub_sdk/node/related_node.py +3 -3
  135. infrahub_sdk/node/relationship.py +4 -6
  136. infrahub_sdk/object_store.py +2 -2
  137. infrahub_sdk/operation.py +1 -1
  138. infrahub_sdk/protocols_generator/generator.py +1 -1
  139. infrahub_sdk/pytest_plugin/exceptions.py +9 -9
  140. infrahub_sdk/pytest_plugin/items/base.py +1 -1
  141. infrahub_sdk/pytest_plugin/items/check.py +1 -1
  142. infrahub_sdk/pytest_plugin/items/python_transform.py +1 -1
  143. infrahub_sdk/repository.py +1 -1
  144. infrahub_sdk/schema/__init__.py +33 -5
  145. infrahub_sdk/spec/models.py +7 -0
  146. infrahub_sdk/spec/object.py +41 -102
  147. infrahub_sdk/spec/processors/__init__.py +0 -0
  148. infrahub_sdk/spec/processors/data_processor.py +10 -0
  149. infrahub_sdk/spec/processors/factory.py +34 -0
  150. infrahub_sdk/spec/processors/range_expand_processor.py +56 -0
  151. infrahub_sdk/task/exceptions.py +4 -4
  152. infrahub_sdk/task/manager.py +2 -2
  153. infrahub_sdk/task/models.py +6 -4
  154. infrahub_sdk/timestamp.py +1 -1
  155. infrahub_sdk/transfer/exporter/json.py +1 -1
  156. infrahub_sdk/transfer/importer/json.py +1 -1
  157. infrahub_sdk/transforms.py +1 -1
  158. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/METADATA +4 -2
  159. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/RECORD +168 -152
  160. infrahub_testcontainers/container.py +144 -6
  161. infrahub_testcontainers/docker-compose-cluster.test.yml +5 -0
  162. infrahub_testcontainers/docker-compose.test.yml +5 -0
  163. infrahub_testcontainers/helpers.py +19 -4
  164. infrahub_testcontainers/models.py +8 -6
  165. infrahub_testcontainers/performance_test.py +6 -4
  166. infrahub/core/migrations/graph/m040_profile_attrs_in_db.py +0 -166
  167. infrahub/core/migrations/graph/m041_create_hfid_display_label_in_db.py +0 -97
  168. infrahub/core/migrations/graph/m042_backfill_hfid_display_label_in_db.py +0 -86
  169. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/LICENSE.txt +0 -0
  170. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/WHEEL +0 -0
  171. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/entry_points.txt +0 -0
@@ -301,13 +301,15 @@ WITH p, q, diff_rel, CASE
301
301
  ELSE $from_time
302
302
  END AS row_from_time
303
303
  ORDER BY %(id_func)s(p) DESC
304
- SKIP $offset
305
- LIMIT $limit
304
+ SKIP toInteger($offset)
305
+ LIMIT toInteger($limit)
306
306
  // -------------------------------------
307
307
  // Add flag to indicate if there is more data after this
308
308
  // -------------------------------------
309
309
  WITH collect([p, q, diff_rel, row_from_time]) AS limited_results
310
- WITH limited_results, size(limited_results) = $limit AS has_more_data
310
+ // extra NULL row ensures that has_more_data is always returned, even if all results are filtered out below
311
+ WITH limited_results + [[NULL, NULL, NULL, NULL]] AS limited_results
312
+ WITH limited_results, size(limited_results) = ($limit + 1) AS has_more_data
311
313
  UNWIND limited_results AS one_result
312
314
  WITH one_result[0] AS p, one_result[1] AS q, one_result[2] AS diff_rel, one_result[3] AS row_from_time, has_more_data
313
315
  // -------------------------------------
@@ -470,14 +472,16 @@ AND (
470
472
  // Limit the number of paths
471
473
  // -------------------------------------
472
474
  WITH root, r_root, p, diff_rel, q
473
- ORDER BY r_root.from, p.uuid, q.uuid, diff_rel.branch, diff_rel.from
474
- SKIP $offset
475
- LIMIT $limit
475
+ ORDER BY r_root.from, p.uuid, q.uuid, q.name, diff_rel.branch, diff_rel.from
476
+ SKIP toInteger($offset)
477
+ LIMIT toInteger($limit)
476
478
  // -------------------------------------
477
479
  // Add flag to indicate if there is more data after this
478
480
  // -------------------------------------
479
481
  WITH collect([root, r_root, p, diff_rel, q]) AS limited_results
480
- WITH limited_results, size(limited_results) = $limit AS has_more_data
482
+ // extra NULL row ensures that has_more_data is always returned, even if all results are filtered out below
483
+ WITH limited_results + [[NULL, NULL, NULL, NULL, NULL]] AS limited_results
484
+ WITH limited_results, size(limited_results) = ($limit + 1) AS has_more_data
481
485
  UNWIND limited_results AS one_result
482
486
  WITH one_result[0] AS root, one_result[1] AS r_root, one_result[2] AS p, one_result[3] AS diff_rel, one_result[4] AS q, has_more_data
483
487
  // -------------------------------------
@@ -641,8 +645,28 @@ AND (
641
645
  )
642
646
  // skip paths where nodes/attrs/rels are updated after $from_time, those are handled in other queries
643
647
  AND (
644
- r_root.from <= $from_time AND (r_root.to IS NULL OR r_root.branch <> diff_rel.branch OR r_root.to <= $from_time)
645
- AND r_node.from <= $from_time AND (r_node.to IS NULL OR r_node.branch <> diff_rel.branch OR r_node.to <= $from_time)
648
+ (
649
+ r_root.branch = diff_rel.branch
650
+ AND r_root.from <= $from_time
651
+ AND (r_root.to IS NULL OR r_root.to >= $to_time)
652
+ )
653
+ OR (
654
+ r_root.branch <> diff_rel.branch
655
+ AND r_root.from <= $from_time
656
+ AND (r_root.to IS NULL OR r_root.to >= $branch_from_time)
657
+ )
658
+ )
659
+ AND (
660
+ (
661
+ r_node.branch = diff_rel.branch
662
+ AND r_node.from <= $from_time
663
+ AND (r_node.to IS NULL OR r_node.to >= $to_time)
664
+ )
665
+ OR (
666
+ r_node.branch <> diff_rel.branch
667
+ AND r_node.from <= $from_time
668
+ AND (r_node.to IS NULL OR r_node.to >= $branch_from_time)
669
+ )
646
670
  )
647
671
  )
648
672
  // time-based filters for new nodes
@@ -658,8 +682,27 @@ AND (
658
682
  )
659
683
  // skip paths where nodes/attrs/rels are updated after $branch_from_time, those are handled in other queries
660
684
  AND (
661
- r_root.from <= $branch_from_time AND (r_root.to IS NULL OR r_root.branch <> diff_rel.branch OR r_root.to <= $branch_from_time)
662
- AND r_node.from <= $branch_from_time AND (r_node.to IS NULL OR r_node.branch <> diff_rel.branch OR r_node.to <= $branch_from_time)
685
+ (
686
+ r_root.branch = diff_rel.branch
687
+ AND (r_root.to IS NULL OR r_root.to >= $to_time)
688
+ )
689
+ OR (
690
+ r_root.branch <> diff_rel.branch
691
+ AND r_root.from <= $branch_from_time
692
+ AND (r_root.to IS NULL OR r_root.to >= $branch_from_time)
693
+ )
694
+ )
695
+ AND (
696
+ (
697
+ r_node.branch = diff_rel.branch
698
+ AND r_node.from <= $branch_from_time
699
+ AND (r_node.to IS NULL OR r_node.to >= $to_time)
700
+ )
701
+ OR (
702
+ r_node.branch <> diff_rel.branch
703
+ AND r_node.from <= $branch_from_time
704
+ AND (r_node.to IS NULL OR r_node.to >= $branch_from_time)
705
+ )
663
706
  )
664
707
  )
665
708
  )
@@ -701,13 +744,15 @@ AND [%(id_func)s(n), type(r_node)] <> [%(id_func)s(q), type(diff_rel)]
701
744
  // -------------------------------------
702
745
  WITH diff_rel_path, r_root, n, r_node, p, diff_rel
703
746
  ORDER BY r_root.from, n.uuid, p.uuid, type(diff_rel), diff_rel.branch, diff_rel.from
704
- SKIP $offset
705
- LIMIT $limit
747
+ SKIP toInteger($offset)
748
+ LIMIT toInteger($limit)
706
749
  // -------------------------------------
707
750
  // Add flag to indicate if there is more data after this
708
751
  // -------------------------------------
709
752
  WITH collect([diff_rel_path, r_root, n, r_node, p, diff_rel]) AS limited_results
710
- WITH limited_results, size(limited_results) = $limit AS has_more_data
753
+ // extra NULL row ensures that has_more_data is always returned, even if all results are filtered out below
754
+ WITH limited_results + [[NULL, NULL, NULL, NULL, NULL, NULL]] AS limited_results
755
+ WITH limited_results, size(limited_results) = ($limit + 1) AS has_more_data
711
756
  UNWIND limited_results AS one_result
712
757
  WITH one_result[0] AS diff_rel_path, one_result[1] AS r_root, one_result[2] AS n,
713
758
  one_result[3] AS r_node, one_result[4] AS p, one_result[5] AS diff_rel, has_more_data
@@ -803,8 +848,8 @@ WHERE num_nodes_with_uuid > 1
803
848
  // -------------------------------------
804
849
  WITH node_uuid
805
850
  ORDER BY node_uuid
806
- SKIP $offset
807
- LIMIT $limit
851
+ SKIP toInteger($offset)
852
+ LIMIT toInteger($limit)
808
853
  WITH collect(node_uuid) AS node_uuids
809
854
  WITH node_uuids, size(node_uuids) = $limit AS has_more_data
810
855
  MATCH (:Root)<-[diff_rel:IS_PART_OF {branch: $branch_name}]-(n:Node)
@@ -450,12 +450,23 @@ class IPPrefixReconcileQuery(Query):
450
450
  // ------------------
451
451
  CALL (ip_node) {
452
452
  OPTIONAL MATCH parent_prefix_path = (ip_node)-[r1:IS_RELATED]->(:Relationship {name: "parent__child"})-[r2:IS_RELATED]->(current_parent:%(ip_prefix_kind)s)
453
- WHERE all(r IN relationships(parent_prefix_path) WHERE (%(branch_filter)s))
453
+ WHERE $is_prefix = TRUE
454
+ AND all(r IN relationships(parent_prefix_path) WHERE (%(branch_filter)s))
454
455
  RETURN current_parent, (r1.status = "active" AND r2.status = "active") AS parent_is_active
455
456
  ORDER BY r1.branch_level DESC, r1.from DESC, r1.status ASC, r2.branch_level DESC, r2.from DESC, r2.status ASC
456
457
  LIMIT 1
457
458
  }
458
- WITH ip_namespace, ip_node, CASE WHEN parent_is_active THEN current_parent ELSE NULL END as current_parent
459
+ WITH ip_namespace, ip_node, CASE WHEN parent_is_active THEN current_parent ELSE NULL END as prefix_parent
460
+ CALL (ip_node) {
461
+ OPTIONAL MATCH parent_prefix_path = (ip_node)-[r1:IS_RELATED]->(:Relationship {name: "ip_prefix__ip_address"})<-[r2:IS_RELATED]-(current_parent:%(ip_prefix_kind)s)
462
+ WHERE $is_prefix = FALSE
463
+ AND all(r IN relationships(parent_prefix_path) WHERE (%(branch_filter)s))
464
+ RETURN current_parent, (r1.status = "active" AND r2.status = "active") AS parent_is_active
465
+ ORDER BY r1.branch_level DESC, r1.from DESC, r1.status ASC, r2.branch_level DESC, r2.from DESC, r2.status ASC
466
+ LIMIT 1
467
+ }
468
+ WITH ip_namespace, ip_node, prefix_parent, CASE WHEN parent_is_active THEN current_parent ELSE NULL END as address_parent
469
+ WITH ip_namespace, ip_node, COALESCE(prefix_parent, address_parent) AS current_parent
459
470
  """ % {
460
471
  "branch_filter": branch_filter,
461
472
  "ip_prefix_kind": InfrahubKind.IPPREFIX,
@@ -467,7 +478,7 @@ class IPPrefixReconcileQuery(Query):
467
478
  // Get prefix node's current prefix children, if any exist
468
479
  // ------------------
469
480
  CALL (ip_node) {
470
- OPTIONAL MATCH child_prefix_path = (ip_node)<-[r1:IS_RELATED]-(:Relationship {name: "parent__child"})<-[r2:IS_RELATED]-(current_prefix_child:%(ip_prefix_kind)s)
481
+ OPTIONAL MATCH child_prefix_path = (ip_node:%(ip_prefix_kind)s)<-[r1:IS_RELATED]-(:Relationship {name: "parent__child"})<-[r2:IS_RELATED]-(current_prefix_child:%(ip_prefix_kind)s)
471
482
  WHERE all(r IN relationships(child_prefix_path) WHERE (%(branch_filter)s))
472
483
  WITH current_prefix_child, (r1.status = "active" AND r2.status = "active") AS is_active
473
484
  ORDER BY current_prefix_child.uuid, r1.branch_level DESC, r1.from DESC, r2.branch_level DESC, r2.from DESC
@@ -479,7 +490,7 @@ class IPPrefixReconcileQuery(Query):
479
490
  // Get prefix node's current address children, if any exist
480
491
  // ------------------
481
492
  CALL (ip_node) {
482
- OPTIONAL MATCH child_address_path = (ip_node)-[r1:IS_RELATED]-(:Relationship {name: "ip_prefix__ip_address"})-[r2:IS_RELATED]-(current_address_child:%(ip_address_kind)s)
493
+ OPTIONAL MATCH child_address_path = (ip_node:%(ip_prefix_kind)s)-[r1:IS_RELATED]->(:Relationship {name: "ip_prefix__ip_address"})<-[r2:IS_RELATED]-(current_address_child:%(ip_address_kind)s)
483
494
  WHERE all(r IN relationships(child_address_path) WHERE (%(branch_filter)s))
484
495
  WITH current_address_child, (r1.status = "active" AND r2.status = "active") AS is_active
485
496
  ORDER BY current_address_child.uuid, r1.branch_level DESC, r1.from DESC, r2.branch_level DESC, r2.from DESC
@@ -688,6 +699,7 @@ class IPPrefixReconcileQuery(Query):
688
699
  "ip_address_attribute_kind": ADDRESS_ATTRIBUTE_LABEL,
689
700
  }
690
701
  self.add_to_query(get_new_children_query)
702
+ self.order_by = ["ip_node.uuid"]
691
703
  self.return_labels = ["ip_node", "current_parent", "current_children", "new_parent", "new_children"]
692
704
 
693
705
  def _get_uuid_from_query(self, node_name: str) -> str | None:
@@ -246,11 +246,15 @@ class NodeCreateAllQuery(NodeQuery):
246
246
  ipnetwork_prop_list = [f"{key}: {value}" for key, value in ipnetwork_prop.items()]
247
247
 
248
248
  attrs_nonindexed_query = """
249
- WITH distinct n
249
+ WITH DISTINCT n
250
250
  UNWIND $attrs AS attr
251
251
  // Try to find a matching vertex
252
- OPTIONAL MATCH (existing_av:AttributeValue {value: attr.content.value, is_default: attr.content.is_default})
253
- WHERE NOT existing_av:AttributeValueIndexed
252
+ CALL (attr) {
253
+ OPTIONAL MATCH (existing_av:AttributeValue {value: attr.content.value, is_default: attr.content.is_default})
254
+ WHERE NOT existing_av:AttributeValueIndexed
255
+ RETURN existing_av
256
+ LIMIT 1
257
+ }
254
258
  CALL (attr, existing_av) {
255
259
  // If none found, create a new one
256
260
  WITH existing_av
@@ -638,53 +642,55 @@ class NodeListGetAttributeQuery(Query):
638
642
  self.add_to_query(query)
639
643
 
640
644
  query = """
641
- CALL (n, a) {
642
- MATCH (n)-[r:HAS_ATTRIBUTE]-(a:Attribute)
643
- WHERE %(branch_filter)s
644
- RETURN r AS r1
645
- ORDER BY r.branch_level DESC, r.from DESC
646
- LIMIT 1
647
- }
648
- WITH n, r1, a, might_use_profile
649
- WHERE r1.status = "active"
650
- WITH n, r1, a, might_use_profile
651
- MATCH (a)-[r:HAS_VALUE]-(av:AttributeValue)
652
- WHERE %(branch_filter)s
653
- CALL (a, might_use_profile) {
654
- OPTIONAL MATCH (a)-[r:HAS_SOURCE]->(:CoreProfile)
655
- WHERE might_use_profile = TRUE AND %(branch_filter)s
656
- RETURN r.status = "active" AS has_active_profile
657
- ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
658
- LIMIT 1
659
- }
660
- WITH *, has_active_profile = TRUE AS is_from_profile
661
- CALL (a, av) {
662
- MATCH (a)-[r:HAS_VALUE]-(av:AttributeValue)
663
- WHERE %(branch_filter)s
664
- RETURN a as a1, r as r2, av as av1
665
- ORDER BY r.branch_level DESC, r.from DESC
666
- LIMIT 1
667
- }
668
- WITH n, r1, a1 as a, r2, av1 as av, is_from_profile
669
- WHERE r2.status = "active"
670
- WITH n, a, av, r1, r2, is_from_profile
645
+ CALL (n, a) {
646
+ MATCH (n)-[r:HAS_ATTRIBUTE]-(a:Attribute)
647
+ WHERE %(branch_filter)s
648
+ RETURN r AS r1
649
+ ORDER BY r.branch_level DESC, r.from DESC
650
+ LIMIT 1
651
+ }
652
+ WITH n, r1, a, might_use_profile
653
+ WHERE r1.status = "active"
654
+ WITH n, r1, a, might_use_profile
655
+ CALL (a, might_use_profile) {
656
+ OPTIONAL MATCH (a)-[r:HAS_SOURCE]->(:CoreProfile)
657
+ WHERE might_use_profile = TRUE AND %(branch_filter)s
658
+ RETURN r.status = "active" AS has_active_profile
659
+ ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
660
+ LIMIT 1
661
+ }
662
+ WITH *, has_active_profile = TRUE AS is_from_profile
663
+ CALL (a) {
664
+ MATCH (a)-[r:HAS_VALUE]-(av:AttributeValue)
665
+ WHERE %(branch_filter)s
666
+ RETURN r as r2, av
667
+ ORDER BY r.branch_level DESC, r.from DESC
668
+ LIMIT 1
669
+ }
670
+ WITH n, r1, a, r2, av, is_from_profile
671
+ WHERE r2.status = "active"
671
672
  """ % {"branch_filter": branch_filter}
672
673
  self.add_to_query(query)
673
674
 
674
675
  self.return_labels = ["n", "a", "av", "r1", "r2", "is_from_profile"]
675
676
 
676
677
  # Add Is_Protected and Is_visible
677
- rel_isv_branch_filter, _ = self.branch.get_query_filter_path(
678
- at=self.at, branch_agnostic=self.branch_agnostic, variable_name="rel_isv"
679
- )
680
- rel_isp_branch_filter, _ = self.branch.get_query_filter_path(
681
- at=self.at, branch_agnostic=self.branch_agnostic, variable_name="rel_isp"
682
- )
683
678
  query = """
684
- MATCH (a)-[rel_isv:IS_VISIBLE]-(isv:Boolean)
685
- MATCH (a)-[rel_isp:IS_PROTECTED]-(isp:Boolean)
686
- WHERE (%(rel_isv_branch_filter)s) AND (%(rel_isp_branch_filter)s)
687
- """ % {"rel_isv_branch_filter": rel_isv_branch_filter, "rel_isp_branch_filter": rel_isp_branch_filter}
679
+ CALL (a) {
680
+ MATCH (a)-[r:IS_VISIBLE]-(isv:Boolean)
681
+ WHERE (%(branch_filter)s)
682
+ RETURN r AS rel_isv, isv
683
+ ORDER BY rel_isv.branch_level DESC, rel_isv.from DESC, rel_isv.status ASC
684
+ LIMIT 1
685
+ }
686
+ CALL (a) {
687
+ MATCH (a)-[r:IS_PROTECTED]-(isp:Boolean)
688
+ WHERE (%(branch_filter)s)
689
+ RETURN r AS rel_isp, isp
690
+ ORDER BY rel_isp.branch_level DESC, rel_isp.from DESC, rel_isp.status ASC
691
+ LIMIT 1
692
+ }
693
+ """ % {"branch_filter": branch_filter}
688
694
  self.add_to_query(query)
689
695
 
690
696
  self.return_labels.extend(["isv", "isp", "rel_isv", "rel_isp"])
@@ -915,6 +921,7 @@ class NodeListGetRelationshipsQuery(Query):
915
921
  RETURN DISTINCT n_uuid, rel_name, peer_uuid, direction
916
922
  """ % {"filters": rels_filter}
917
923
  self.add_to_query(query)
924
+ self.order_by = ["n_uuid", "rel_name", "peer_uuid", "direction"]
918
925
  self.return_labels = ["n_uuid", "rel_name", "peer_uuid", "direction"]
919
926
 
920
927
  def get_peers_group_by_node(self) -> GroupedPeerNodes:
@@ -984,6 +991,7 @@ class NodeListGetInfoQuery(Query):
984
991
  )
985
992
  self.params.update(branch_params)
986
993
  self.params["ids"] = self.ids
994
+ self.order_by = ["n.uuid"]
987
995
 
988
996
  query = """
989
997
  MATCH p = (root:Root)<-[:IS_PART_OF]-(n:Node)
@@ -1036,6 +1036,7 @@ class RelationshipCountPerNodeQuery(Query):
1036
1036
  """ % {"branch_filter": branch_filter, "path": path}
1037
1037
 
1038
1038
  self.add_to_query(query)
1039
+ self.order_by = ["peer_node.uuid"]
1039
1040
  self.return_labels = ["peer_node.uuid", "COUNT(peer_node.uuid) as nbr_peers"]
1040
1041
 
1041
1042
  async def get_count_per_peer(self) -> dict[str, int]:
@@ -445,15 +445,20 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
445
445
  )
446
446
  await delete_query.execute(db=db)
447
447
 
448
- async def resolve(self, db: InfrahubDatabase, at: Timestamp | None = None) -> None:
448
+ async def resolve(self, db: InfrahubDatabase, at: Timestamp | None = None, fields: list[str] | None = None) -> None:
449
449
  """Resolve the peer of the relationship."""
450
450
 
451
+ fields = fields or []
452
+ query_fields = dict.fromkeys(fields)
453
+ if "display_label" not in query_fields:
454
+ query_fields["display_label"] = None
455
+
451
456
  if self._peer is not None:
452
457
  return
453
458
 
454
459
  if self.peer_id and not is_valid_uuid(self.peer_id):
455
460
  peer = await registry.manager.get_one_by_default_filter(
456
- db=db, id=self.peer_id, branch=self.branch, kind=self.schema.peer, fields={"display_label": None}
461
+ db=db, id=self.peer_id, branch=self.branch, kind=self.schema.peer, fields=query_fields
457
462
  )
458
463
  if peer:
459
464
  self.set_peer(value=peer)
@@ -470,7 +475,7 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
470
475
  hfid=self.peer_hfid,
471
476
  branch=self.branch,
472
477
  kind=kind,
473
- fields={"display_label": None},
478
+ fields=query_fields,
474
479
  raise_on_error=True,
475
480
  )
476
481
  self.set_peer(value=peer)
@@ -1142,9 +1147,9 @@ class RelationshipManager:
1142
1147
 
1143
1148
  return True
1144
1149
 
1145
- async def resolve(self, db: InfrahubDatabase) -> None:
1150
+ async def resolve(self, db: InfrahubDatabase, fields: list[str] | None = None) -> None:
1146
1151
  for rel in self._relationships:
1147
- await rel.resolve(db=db)
1152
+ await rel.resolve(db=db, fields=fields)
1148
1153
 
1149
1154
  async def remove_locally(
1150
1155
  self,
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import uuid
4
+ from enum import Enum
4
5
  from typing import Any, TypeAlias
5
6
 
6
7
  from infrahub_sdk.utils import deep_merge_dict
@@ -44,6 +45,21 @@ class SchemaExtension(HashableModel):
44
45
  nodes: list[NodeExtensionSchema] = Field(default_factory=list)
45
46
 
46
47
 
48
+ class SchemaWarningType(Enum):
49
+ DEPRECATION = "deprecation"
50
+
51
+
52
+ class SchemaWarningKind(BaseModel):
53
+ kind: str = Field(..., description="The kind impacted by the warning")
54
+ field: str | None = Field(default=None, description="The attribute or relationship impacted by the warning")
55
+
56
+
57
+ class SchemaWarning(BaseModel):
58
+ type: SchemaWarningType = Field(..., description="The type of warning")
59
+ kinds: list[SchemaWarningKind] = Field(default_factory=list, description="The kinds impacted by the warning")
60
+ message: str = Field(..., description="The message that describes the warning")
61
+
62
+
47
63
  class SchemaRoot(BaseModel):
48
64
  model_config = ConfigDict(extra="forbid")
49
65
 
@@ -80,6 +96,46 @@ class SchemaRoot(BaseModel):
80
96
 
81
97
  return errors
82
98
 
99
+ def gather_warnings(self) -> list[SchemaWarning]:
100
+ models = self.nodes + self.generics
101
+ warnings: list[SchemaWarning] = []
102
+ for model in models:
103
+ if model.display_labels is not None:
104
+ warnings.append(
105
+ SchemaWarning(
106
+ type=SchemaWarningType.DEPRECATION,
107
+ kinds=[SchemaWarningKind(kind=model.kind)],
108
+ message="display_labels are deprecated, use display_label instead",
109
+ )
110
+ )
111
+ if model.default_filter is not None:
112
+ warnings.append(
113
+ SchemaWarning(
114
+ type=SchemaWarningType.DEPRECATION,
115
+ kinds=[SchemaWarningKind(kind=model.kind)],
116
+ message="default_filter is deprecated",
117
+ )
118
+ )
119
+ for attribute in model.attributes:
120
+ if attribute.max_length is not None:
121
+ warnings.append(
122
+ SchemaWarning(
123
+ type=SchemaWarningType.DEPRECATION,
124
+ kinds=[SchemaWarningKind(kind=model.kind, field=attribute.name)],
125
+ message="Use of 'max_length' on attributes is deprecated, use parameters instead",
126
+ )
127
+ )
128
+ if attribute.min_length is not None:
129
+ warnings.append(
130
+ SchemaWarning(
131
+ type=SchemaWarningType.DEPRECATION,
132
+ kinds=[SchemaWarningKind(kind=model.kind, field=attribute.name)],
133
+ message="Use of 'min_length' on attributes is deprecated, use parameters instead",
134
+ )
135
+ )
136
+
137
+ return warnings
138
+
83
139
  def generate_uuid(self) -> None:
84
140
  """Generate UUID for all nodes, attributes & relationships
85
141
  Mainly useful during unit tests."""
@@ -68,6 +68,10 @@ class AttributeSchema(GeneratedAttributeSchema):
68
68
  def is_deprecated(self) -> bool:
69
69
  return bool(self.deprecation)
70
70
 
71
+ @property
72
+ def support_profiles(self) -> bool:
73
+ return self.read_only is False and self.optional is True
74
+
71
75
  def get_id(self) -> str:
72
76
  if self.id is None:
73
77
  raise InitializationError("The attribute schema has not been saved yet and doesn't have an id")
@@ -29,7 +29,7 @@ core_check_definition = NodeSchema(
29
29
  Attr(name="description", kind="Text", optional=True),
30
30
  Attr(name="file_path", kind="Text"),
31
31
  Attr(name="class_name", kind="Text"),
32
- Attr(name="timeout", kind="Number", default_value=10),
32
+ Attr(name="timeout", kind="Number", default_value=60),
33
33
  Attr(name="parameters", kind="JSON", optional=True),
34
34
  ],
35
35
  relationships=[
@@ -29,7 +29,7 @@ core_transform = GenericSchema(
29
29
  Attr(name="name", kind="Text", unique=True),
30
30
  Attr(name="label", kind="Text", optional=True),
31
31
  Attr(name="description", kind="Text", optional=True),
32
- Attr(name="timeout", kind="Number", default_value=10),
32
+ Attr(name="timeout", kind="Number", default_value=60),
33
33
  ],
34
34
  relationships=[
35
35
  Rel(
@@ -568,7 +568,7 @@ attribute_schema = SchemaNode(
568
568
  "Mainly relevant for internal object.",
569
569
  default_value=False,
570
570
  optional=True,
571
- extra={"update": UpdateSupport.ALLOWED},
571
+ extra={"update": UpdateSupport.MIGRATION_REQUIRED},
572
572
  ),
573
573
  SchemaAttribute(
574
574
  name="unique",
@@ -585,7 +585,7 @@ attribute_schema = SchemaNode(
585
585
  default_value=False,
586
586
  override_default_value=False,
587
587
  optional=True,
588
- extra={"update": UpdateSupport.VALIDATE_CONSTRAINT},
588
+ extra={"update": UpdateSupport.MIGRATION_REQUIRED},
589
589
  ),
590
590
  SchemaAttribute(
591
591
  name="branch",
@@ -78,7 +78,7 @@ class GeneratedAttributeSchema(HashableModel):
78
78
  read_only: bool = Field(
79
79
  default=False,
80
80
  description="Set the attribute as Read-Only, users won't be able to change its value. Mainly relevant for internal object.",
81
- json_schema_extra={"update": "allowed"},
81
+ json_schema_extra={"update": "migration_required"},
82
82
  )
83
83
  unique: bool = Field(
84
84
  default=False,
@@ -88,7 +88,7 @@ class GeneratedAttributeSchema(HashableModel):
88
88
  optional: bool = Field(
89
89
  default=False,
90
90
  description="Indicate if this attribute is mandatory or optional.",
91
- json_schema_extra={"update": "validate_constraint"},
91
+ json_schema_extra={"update": "migration_required"},
92
92
  )
93
93
  branch: BranchSupportType | None = Field(
94
94
  default=None,
@@ -2,6 +2,9 @@ from __future__ import annotations
2
2
 
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
+ from cachetools import LRUCache
6
+ from infrahub_sdk.schema import BranchSchema as SDKBranchSchema
7
+
5
8
  from infrahub import lock
6
9
  from infrahub.core.manager import NodeManager
7
10
  from infrahub.core.models import (
@@ -40,6 +43,8 @@ class SchemaManager(NodeManager):
40
43
  def __init__(self) -> None:
41
44
  self._cache: dict[int, Any] = {}
42
45
  self._branches: dict[str, SchemaBranch] = {}
46
+ self._branch_hash_by_name: dict[str, str] = {}
47
+ self._sdk_branches: LRUCache[str, SDKBranchSchema] = LRUCache(maxsize=10)
43
48
 
44
49
  def _get_from_cache(self, key: int) -> Any:
45
50
  return self._cache[key]
@@ -140,12 +145,26 @@ class SchemaManager(NodeManager):
140
145
  if name in self._branches:
141
146
  return self._branches[name]
142
147
 
143
- self._branches[name] = SchemaBranch(cache=self._cache, name=name)
148
+ self.set_schema_branch(name, schema=SchemaBranch(cache=self._cache, name=name))
144
149
  return self._branches[name]
145
150
 
151
+ def get_sdk_schema_branch(self, name: str) -> SDKBranchSchema:
152
+ schema_hash = self._branch_hash_by_name[name]
153
+ branch_schema = self._sdk_branches.get(schema_hash)
154
+ if not branch_schema:
155
+ self._sdk_branches[schema_hash] = SDKBranchSchema.from_api_response(
156
+ data=self._branches[name].to_dict_api_schema_object()
157
+ )
158
+
159
+ return self._sdk_branches[schema_hash]
160
+
146
161
  def set_schema_branch(self, name: str, schema: SchemaBranch) -> None:
147
162
  schema.name = name
148
163
  self._branches[name] = schema
164
+ self._branch_hash_by_name[name] = schema.get_hash()
165
+
166
+ def has_schema_branch(self, name: str) -> bool:
167
+ return name in self._branches
149
168
 
150
169
  def process_schema_branch(self, name: str) -> None:
151
170
  schema_branch = self.get_schema_branch(name=name)
@@ -764,6 +783,8 @@ class SchemaManager(NodeManager):
764
783
  for branch_name in list(self._branches.keys()):
765
784
  if branch_name not in active_branches:
766
785
  del self._branches[branch_name]
786
+ if branch_name in self._branch_hash_by_name:
787
+ del self._branch_hash_by_name[branch_name]
767
788
  removed_branches.append(branch_name)
768
789
 
769
790
  for hash_key in list(self._cache.keys()):