infrahub-server 1.3.0b1__py3-none-any.whl → 1.3.0b3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. infrahub/actions/constants.py +87 -0
  2. infrahub/actions/gather.py +3 -3
  3. infrahub/actions/models.py +10 -8
  4. infrahub/actions/parsers.py +6 -6
  5. infrahub/actions/schema.py +46 -37
  6. infrahub/actions/tasks.py +4 -11
  7. infrahub/branch/__init__.py +0 -0
  8. infrahub/branch/tasks.py +29 -0
  9. infrahub/branch/triggers.py +22 -0
  10. infrahub/cli/db.py +2 -2
  11. infrahub/computed_attribute/gather.py +3 -1
  12. infrahub/computed_attribute/tasks.py +23 -29
  13. infrahub/core/constants/__init__.py +5 -0
  14. infrahub/core/constants/database.py +1 -0
  15. infrahub/core/convert_object_type/conversion.py +1 -1
  16. infrahub/core/diff/query/save.py +67 -40
  17. infrahub/core/diff/query/time_range_query.py +0 -1
  18. infrahub/core/graph/__init__.py +1 -1
  19. infrahub/core/migrations/graph/__init__.py +6 -0
  20. infrahub/core/migrations/graph/m013_convert_git_password_credential.py +0 -2
  21. infrahub/core/migrations/graph/m029_duplicates_cleanup.py +662 -0
  22. infrahub/core/migrations/graph/m030_illegal_edges.py +82 -0
  23. infrahub/core/migrations/query/attribute_add.py +13 -9
  24. infrahub/core/migrations/query/relationship_duplicate.py +0 -1
  25. infrahub/core/migrations/schema/node_remove.py +0 -1
  26. infrahub/core/node/__init__.py +2 -0
  27. infrahub/core/node/base.py +1 -1
  28. infrahub/core/path.py +1 -1
  29. infrahub/core/protocols.py +4 -3
  30. infrahub/core/query/node.py +1 -1
  31. infrahub/core/query/relationship.py +2 -2
  32. infrahub/core/query/standard_node.py +19 -5
  33. infrahub/core/relationship/constraints/peer_relatives.py +72 -0
  34. infrahub/core/relationship/model.py +1 -1
  35. infrahub/core/schema/attribute_schema.py +26 -6
  36. infrahub/core/schema/basenode_schema.py +2 -2
  37. infrahub/core/schema/definitions/core/resource_pool.py +9 -0
  38. infrahub/core/schema/definitions/internal.py +9 -1
  39. infrahub/core/schema/generated/attribute_schema.py +4 -4
  40. infrahub/core/schema/generated/relationship_schema.py +6 -1
  41. infrahub/core/schema/manager.py +4 -2
  42. infrahub/core/schema/schema_branch.py +14 -5
  43. infrahub/core/validators/tasks.py +1 -1
  44. infrahub/database/__init__.py +1 -1
  45. infrahub/database/validation.py +100 -0
  46. infrahub/dependencies/builder/constraint/grouped/node_runner.py +2 -0
  47. infrahub/dependencies/builder/constraint/relationship_manager/peer_relatives.py +8 -0
  48. infrahub/dependencies/builder/diff/deserializer.py +1 -1
  49. infrahub/dependencies/registry.py +2 -0
  50. infrahub/events/models.py +1 -1
  51. infrahub/graphql/mutations/main.py +1 -1
  52. infrahub/graphql/mutations/resource_manager.py +13 -13
  53. infrahub/graphql/resolvers/many_relationship.py +1 -1
  54. infrahub/graphql/resolvers/resolver.py +2 -2
  55. infrahub/graphql/resolvers/single_relationship.py +1 -1
  56. infrahub/menu/menu.py +5 -4
  57. infrahub/message_bus/operations/refresh/registry.py +3 -3
  58. infrahub/patch/queries/delete_duplicated_edges.py +40 -29
  59. infrahub/pools/registration.py +22 -0
  60. infrahub/pools/tasks.py +56 -0
  61. infrahub/proposed_change/tasks.py +8 -8
  62. infrahub/schema/__init__.py +0 -0
  63. infrahub/schema/tasks.py +27 -0
  64. infrahub/schema/triggers.py +23 -0
  65. infrahub/trigger/catalogue.py +4 -0
  66. infrahub/trigger/models.py +5 -4
  67. infrahub/trigger/setup.py +26 -2
  68. infrahub/trigger/tasks.py +1 -1
  69. infrahub/webhook/tasks.py +6 -9
  70. infrahub/workflows/catalogue.py +27 -1
  71. {infrahub_server-1.3.0b1.dist-info → infrahub_server-1.3.0b3.dist-info}/METADATA +1 -1
  72. {infrahub_server-1.3.0b1.dist-info → infrahub_server-1.3.0b3.dist-info}/RECORD +80 -67
  73. infrahub_testcontainers/container.py +239 -64
  74. infrahub_testcontainers/docker-compose-cluster.test.yml +321 -0
  75. infrahub_testcontainers/docker-compose.test.yml +1 -0
  76. infrahub_testcontainers/helpers.py +15 -1
  77. infrahub_testcontainers/plugin.py +9 -0
  78. infrahub/patch/queries/consolidate_duplicated_nodes.py +0 -106
  79. {infrahub_server-1.3.0b1.dist-info → infrahub_server-1.3.0b3.dist-info}/LICENSE.txt +0 -0
  80. {infrahub_server-1.3.0b1.dist-info → infrahub_server-1.3.0b3.dist-info}/WHEEL +0 -0
  81. {infrahub_server-1.3.0b1.dist-info → infrahub_server-1.3.0b3.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,100 @@
1
+ from infrahub.database import InfrahubDatabase
2
+
3
+
4
+ async def verify_no_duplicate_relationships(db: InfrahubDatabase) -> None:
5
+ """
6
+ Verify that no duplicate active relationships exist at the database level
7
+ A duplicate is defined as
8
+ - connecting the same two nodes
9
+ - having the same identifier
10
+ - having the same direction (inbound, outbound, bidirectional)
11
+ - having the same branch
12
+ A more thorough check that no duplicates exist at any point in time is possible, but more complex
13
+ """
14
+ query = """
15
+ MATCH (a:Node)-[e1:IS_RELATED {status: "active"}]-(rel:Relationship)-[e2:IS_RELATED {branch: e1.branch, status: "active"}]-(b:Node)
16
+ WHERE a.uuid <> b.uuid
17
+ AND e1.to IS NULL
18
+ AND e2.to IS NULL
19
+ WITH a, rel.name AS rel_name, b, e1.branch AS branch, CASE
20
+ WHEN startNode(e1) = a AND startNode(e2) = rel THEN "out"
21
+ WHEN startNode(e1) = rel AND startNode(e2) = b THEN "in"
22
+ ELSE "bidir"
23
+ END AS direction, COUNT(*) AS num_duplicates
24
+ WHERE num_duplicates > 1
25
+ RETURN a.uuid AS node_id1, b.uuid AS node_id2, rel_name, branch, direction, num_duplicates
26
+ """
27
+ results = await db.execute_query(query=query)
28
+ for result in results:
29
+ node_id1 = result.get("node_id1")
30
+ node_id2 = result.get("node_id2")
31
+ rel_name = result.get("rel_name")
32
+ branch = result.get("branch")
33
+ direction = result.get("direction")
34
+ num_duplicates = result.get("num_duplicates")
35
+ raise ValueError(
36
+ f"{num_duplicates} duplicate relationships ({branch=},{direction=}) between nodes '{node_id1}' and '{node_id2}'"
37
+ f" with relationship name '{rel_name}'"
38
+ )
39
+
40
+
41
+ async def verify_no_edges_added_after_node_delete(db: InfrahubDatabase) -> None:
42
+ """
43
+ Verify that no edges are added to a Node after it is deleted on a given branch
44
+ """
45
+ query = """
46
+ // ------------
47
+ // find deleted nodes
48
+ // ------------
49
+ MATCH (n:Node)-[e:IS_PART_OF]->(:Root)
50
+ WHERE e.status = "deleted" OR e.to IS NOT NULL
51
+ WITH DISTINCT n, e.branch AS delete_branch, e.branch_level AS delete_branch_level, CASE
52
+ WHEN e.status = "deleted" THEN e.from
53
+ ELSE e.to
54
+ END AS delete_time
55
+ // ------------
56
+ // find the edges added to the deleted node after the delete time
57
+ // ------------
58
+ MATCH (n)-[added_e]-(peer)
59
+ WHERE added_e.from > delete_time
60
+ AND type(added_e) <> "IS_PART_OF"
61
+ // if the node was deleted on a branch (delete_branch_level > 1), and then updated on main/global (added_e.branch_level = 1), we can ignore it
62
+ AND added_e.branch_level >= delete_branch_level
63
+ AND (added_e.branch = delete_branch OR delete_branch_level = 1)
64
+ WITH DISTINCT n, delete_branch, delete_time, added_e, peer AS added_peer
65
+ // ------------
66
+ // get the branched_from for the branch on which the node was deleted
67
+ // ------------
68
+ CALL (added_e) {
69
+ MATCH (b:Branch {name: added_e.branch})
70
+ RETURN b.branched_from AS added_e_branched_from
71
+ }
72
+ // ------------
73
+ // account for the following situations, given that the edge update time is after the node delete time
74
+ // - deleted on main/global, updated on branch
75
+ // - illegal if the delete is before branch.branched_from
76
+ // - deleted on branch, updated on branch
77
+ // - illegal
78
+ // ------------
79
+ WITH n, delete_branch, delete_time, added_e, added_peer
80
+ WHERE delete_branch = added_e.branch
81
+ OR delete_time < added_e_branched_from
82
+ RETURN n.uuid AS n_uuid, delete_branch, delete_time, added_e, added_peer
83
+ """
84
+ results = await db.execute_query(query=query)
85
+ error_messages = []
86
+ for result in results:
87
+ n_uuid = result.get("n_uuid")
88
+ delete_branch = result.get("delete_branch")
89
+ delete_time = result.get("delete_time")
90
+ added_e = result.get("added_e")
91
+ added_e_branch = added_e.get("branch")
92
+ added_e_from = added_e.get("from")
93
+ added_peer = result.get("added_peer")
94
+ message = (
95
+ f"Node {n_uuid} was deleted on {delete_branch} at {delete_time} but has an {added_e.type} edge added on"
96
+ f" branch {added_e_branch} at {added_e_from} to {added_peer.element_id}"
97
+ )
98
+ error_messages.append(message)
99
+ if error_messages:
100
+ raise ValueError(error_messages)
@@ -4,6 +4,7 @@ from infrahub.dependencies.interface import DependencyBuilder, DependencyBuilder
4
4
  from ..node.grouped_uniqueness import NodeGroupedUniquenessConstraintDependency
5
5
  from ..relationship_manager.count import RelationshipCountConstraintDependency
6
6
  from ..relationship_manager.peer_kind import RelationshipPeerKindConstraintDependency
7
+ from ..relationship_manager.peer_relatives import RelationshipPeerRelativesConstraintDependency
7
8
  from ..relationship_manager.profiles_kind import RelationshipProfilesKindConstraintDependency
8
9
 
9
10
 
@@ -18,5 +19,6 @@ class NodeConstraintRunnerDependency(DependencyBuilder[NodeConstraintRunner]):
18
19
  RelationshipPeerKindConstraintDependency.build(context=context),
19
20
  RelationshipCountConstraintDependency.build(context=context),
20
21
  RelationshipProfilesKindConstraintDependency.build(context=context),
22
+ RelationshipPeerRelativesConstraintDependency.build(context=context),
21
23
  ],
22
24
  )
@@ -0,0 +1,8 @@
1
+ from infrahub.core.relationship.constraints.peer_relatives import RelationshipPeerRelativesConstraint
2
+ from infrahub.dependencies.interface import DependencyBuilder, DependencyBuilderContext
3
+
4
+
5
+ class RelationshipPeerRelativesConstraintDependency(DependencyBuilder[RelationshipPeerRelativesConstraint]):
6
+ @classmethod
7
+ def build(cls, context: DependencyBuilderContext) -> RelationshipPeerRelativesConstraint:
8
+ return RelationshipPeerRelativesConstraint(db=context.db, branch=context.branch)
@@ -6,5 +6,5 @@ from .parent_node_adder import DiffParentNodeAdderDependency
6
6
 
7
7
  class DiffDeserializerDependency(DependencyBuilder[EnrichedDiffDeserializer]):
8
8
  @classmethod
9
- def build(cls, context: DependencyBuilderContext) -> EnrichedDiffDeserializer: # noqa: ARG003
9
+ def build(cls, context: DependencyBuilderContext) -> EnrichedDiffDeserializer:
10
10
  return EnrichedDiffDeserializer(parent_adder=DiffParentNodeAdderDependency.build(context=context))
@@ -3,6 +3,7 @@ from .builder.constraint.node.grouped_uniqueness import NodeGroupedUniquenessCon
3
3
  from .builder.constraint.node.uniqueness import NodeAttributeUniquenessConstraintDependency
4
4
  from .builder.constraint.relationship_manager.count import RelationshipCountConstraintDependency
5
5
  from .builder.constraint.relationship_manager.peer_kind import RelationshipPeerKindConstraintDependency
6
+ from .builder.constraint.relationship_manager.peer_relatives import RelationshipPeerRelativesConstraintDependency
6
7
  from .builder.constraint.relationship_manager.profiles_kind import RelationshipProfilesKindConstraintDependency
7
8
  from .builder.constraint.schema.aggregated import AggregatedSchemaConstraintsDependency
8
9
  from .builder.constraint.schema.attribute_regex import SchemaAttributeRegexConstraintDependency
@@ -37,6 +38,7 @@ def build_component_registry() -> ComponentDependencyRegistry:
37
38
  component_registry.track_dependency(RelationshipCountConstraintDependency)
38
39
  component_registry.track_dependency(RelationshipProfilesKindConstraintDependency)
39
40
  component_registry.track_dependency(RelationshipPeerKindConstraintDependency)
41
+ component_registry.track_dependency(RelationshipPeerRelativesConstraintDependency)
40
42
  component_registry.track_dependency(NodeConstraintRunnerDependency)
41
43
  component_registry.track_dependency(NodeDeleteValidatorDependency)
42
44
  component_registry.track_dependency(IpamKindsGetterDependency)
infrahub/events/models.py CHANGED
@@ -8,7 +8,7 @@ from pydantic import BaseModel, Field, PrivateAttr, model_validator
8
8
 
9
9
  from infrahub import __version__
10
10
  from infrahub.auth import AccountSession, AuthType
11
- from infrahub.context import InfrahubContext # noqa: TC001
11
+ from infrahub.context import InfrahubContext
12
12
  from infrahub.core.branch import Branch # noqa: TC001
13
13
  from infrahub.message_bus import InfrahubMessage, Meta
14
14
  from infrahub.worker import WORKER_IDENTITY
@@ -382,7 +382,7 @@ class InfrahubMutationMixin:
382
382
  except HFIDViolatedError as exc:
383
383
  # Only the HFID constraint has been violated, it means the node exists and we can update without rerunning constraints
384
384
  if len(exc.matching_nodes_ids) > 1:
385
- raise RuntimeError(f"Multiple {schema_name} nodes have the same hfid (database corrupted)") from exc
385
+ raise RuntimeError(f"Multiple {schema_name} nodes have the same hfid") from exc
386
386
  node_id = list(exc.matching_nodes_ids)[0]
387
387
  node = await NodeManager.get_one(db=db, id=node_id, kind=schema_name, branch=branch, raise_on_error=True)
388
388
  updated_obj, mutation = await cls._call_mutate_update(
@@ -7,13 +7,14 @@ from graphene.types.generic import GenericScalar
7
7
  from typing_extensions import Self
8
8
 
9
9
  from infrahub.core import protocols, registry
10
- from infrahub.core.constants import InfrahubKind
10
+ from infrahub.core.constants import InfrahubKind, NumberPoolType
11
11
  from infrahub.core.ipam.constants import PrefixMemberType
12
12
  from infrahub.core.manager import NodeManager
13
13
  from infrahub.core.schema import NodeSchema
14
14
  from infrahub.core.schema.attribute_parameters import NumberAttributeParameters
15
15
  from infrahub.database import retry_db_transaction
16
16
  from infrahub.exceptions import QueryValidationError, SchemaNotFoundError, ValidationError
17
+ from infrahub.pools.registration import get_branches_with_schema_number_pool
17
18
 
18
19
  from ..queries.resource_manager import PoolAllocatedNode
19
20
  from .main import DeleteResult, InfrahubMutationMixin, InfrahubMutationOptions
@@ -234,6 +235,14 @@ class InfrahubNumberPoolMutation(InfrahubMutationMixin, Mutation):
234
235
  number_pool, result = await super().mutate_update(
235
236
  info=info, data=data, branch=branch, database=dbt, node=node
236
237
  )
238
+
239
+ if number_pool.pool_type.value.value == NumberPoolType.SCHEMA.value and ( # type: ignore[attr-defined]
240
+ "start_range" in data.keys() or "end_range" in data.keys()
241
+ ):
242
+ raise ValidationError(
243
+ input_value="start_range or end_range can't be updated on schema defined pools, update the schema in the default branch instead"
244
+ )
245
+
237
246
  if number_pool.start_range.value > number_pool.end_range.value: # type: ignore[attr-defined]
238
247
  raise ValidationError(input_value="start_range can't be larger than end_range")
239
248
 
@@ -257,18 +266,9 @@ class InfrahubNumberPoolMutation(InfrahubMutationMixin, Mutation):
257
266
  branch=branch,
258
267
  )
259
268
 
260
- active_branches = registry.schema.get_branches()
261
- violating_branches = []
262
- for active_branch in active_branches:
263
- try:
264
- schema = registry.schema.get(name=number_pool.node.value, branch=active_branch)
265
- except SchemaNotFoundError:
266
- continue
267
-
268
- if number_pool.node_attribute.value in schema.attribute_names:
269
- attribute = schema.get_attribute(name=number_pool.node_attribute.value)
270
- if attribute.kind == "NumberPool":
271
- violating_branches.append(active_branch)
269
+ violating_branches = get_branches_with_schema_number_pool(
270
+ kind=number_pool.node.value, attribute_name=number_pool.node_attribute.value
271
+ )
272
272
 
273
273
  if violating_branches:
274
274
  raise ValidationError(
@@ -99,7 +99,7 @@ class ManyRelationshipResolver:
99
99
  filters = {
100
100
  f"{info.field_name}__{key}": value
101
101
  for key, value in kwargs.items()
102
- if "__" in key and value or key in ["id", "ids"]
102
+ if ("__" in key and value) or key in ["id", "ids"]
103
103
  }
104
104
 
105
105
  response: dict[str, Any] = {"edges": [], "count": None}
@@ -99,7 +99,7 @@ async def default_resolver(*args: Any, **kwargs) -> dict | list[dict] | None:
99
99
  filters = {
100
100
  f"{info.field_name}__{key}": value
101
101
  for key, value in kwargs.items()
102
- if "__" in key and value or key in ["id", "ids"]
102
+ if ("__" in key and value) or key in ["id", "ids"]
103
103
  }
104
104
 
105
105
  async with graphql_context.db.start_session(read_only=True) as db:
@@ -288,7 +288,7 @@ async def hierarchy_resolver(
288
288
  filters = {
289
289
  f"{info.field_name}__{key}": value
290
290
  for key, value in kwargs.items()
291
- if "__" in key and value or key in ["id", "ids"]
291
+ if ("__" in key and value) or key in ["id", "ids"]
292
292
  }
293
293
 
294
294
  response: dict[str, Any] = {"edges": [], "count": None}
@@ -107,7 +107,7 @@ class SingleRelationshipResolver:
107
107
  filters = {
108
108
  f"{field_name}__{key}": value
109
109
  for key, value in kwargs.items()
110
- if "__" in key and value or key in ["id", "ids"]
110
+ if ("__" in key and value) or key in ["id", "ids"]
111
111
  }
112
112
  async with db.start_session(read_only=True) as dbs:
113
113
  objs = await NodeManager.query_peers(
infrahub/menu/menu.py CHANGED
@@ -57,7 +57,7 @@ default_menu = [
57
57
  name="IPPrefix",
58
58
  label="IP Prefixes",
59
59
  kind=InfrahubKind.IPPREFIX,
60
- path="/ipam/prefixes",
60
+ path="/ipam",
61
61
  icon=_extract_node_icon(infrahub_schema.get(InfrahubKind.IPPREFIX)),
62
62
  protected=True,
63
63
  section=MenuSection.INTERNAL,
@@ -68,7 +68,7 @@ default_menu = [
68
68
  name="IPAddress",
69
69
  label="IP Addresses",
70
70
  kind=InfrahubKind.IPPREFIX,
71
- path="/ipam/addresses?ipam-tab=ip-details",
71
+ path="/ipam/ip_addresses",
72
72
  icon=_extract_node_icon(infrahub_schema.get(InfrahubKind.IPADDRESS)),
73
73
  protected=True,
74
74
  section=MenuSection.INTERNAL,
@@ -79,6 +79,7 @@ default_menu = [
79
79
  name="Namespaces",
80
80
  label="Namespaces",
81
81
  kind=InfrahubKind.IPNAMESPACE,
82
+ path="/ipam/namespaces",
82
83
  icon=_extract_node_icon(infrahub_schema.get(InfrahubKind.IPNAMESPACE)),
83
84
  protected=True,
84
85
  section=MenuSection.INTERNAL,
@@ -256,7 +257,7 @@ default_menu = [
256
257
  order_weight=6000,
257
258
  children=[
258
259
  MenuItemDefinition(
259
- namespace="Core",
260
+ namespace="Builtin",
260
261
  name="TriggerRule",
261
262
  label="Rules",
262
263
  kind=InfrahubKind.TRIGGERRULE,
@@ -266,7 +267,7 @@ default_menu = [
266
267
  order_weight=1000,
267
268
  ),
268
269
  MenuItemDefinition(
269
- namespace="Core",
270
+ namespace="Builtin",
270
271
  name="Action",
271
272
  label="Actions",
272
273
  kind=InfrahubKind.ACTION,
@@ -26,6 +26,6 @@ async def rebased_branch(message: messages.RefreshRegistryRebasedBranch, service
26
26
 
27
27
  async with lock.registry.local_schema_lock():
28
28
  service.log.info("Refreshing rebased branch")
29
- registry.branch[message.branch] = await registry.branch_object.get_by_name(
30
- name=message.branch, db=service.database
31
- )
29
+
30
+ async with service.database.start_session(read_only=True) as db:
31
+ registry.branch[message.branch] = await registry.branch_object.get_by_name(name=message.branch, db=db)
@@ -4,7 +4,7 @@ from .base import PatchQuery
4
4
 
5
5
  class DeleteDuplicatedEdgesPatchQuery(PatchQuery):
6
6
  """
7
- Find duplicated or overlapping edges of the same status, type, and branch to update and delete
7
+ For all Node vertices, find duplicated or overlapping edges of the same status, type, and branch to update and delete
8
8
  - one edge will be kept for each pair of nodes and a given status, type, and branch. it will be
9
9
  updated to have the earliest "from" and "to" times in this group
10
10
  - all the other duplicate/overlapping edges will be deleted
@@ -17,9 +17,9 @@ class DeleteDuplicatedEdgesPatchQuery(PatchQuery):
17
17
  async def plan(self) -> PatchPlan:
18
18
  query = """
19
19
  // ------------
20
- // Find node pairs that have duplicate edges
20
+ // Find vertex pairs that have duplicate edges
21
21
  // ------------
22
- MATCH (node_with_dup_edges:Node)-[edge]->(peer)
22
+ MATCH (node_with_dup_edges:Node)-[edge]-(peer)
23
23
  WITH node_with_dup_edges, type(edge) AS edge_type, edge.status AS edge_status, edge.branch AS edge_branch, peer, count(*) AS num_dup_edges
24
24
  WHERE num_dup_edges > 1
25
25
  WITH DISTINCT node_with_dup_edges, edge_type, edge_branch, peer
@@ -27,12 +27,12 @@ CALL (node_with_dup_edges, edge_type, edge_branch, peer) {
27
27
  // ------------
28
28
  // Get the earliest active and deleted edges for this branch
29
29
  // ------------
30
- MATCH (node_with_dup_edges)-[active_edge {branch: edge_branch, status: "active"}]->(peer)
30
+ OPTIONAL MATCH (node_with_dup_edges)-[active_edge {branch: edge_branch, status: "active"}]->(peer)
31
31
  WHERE type(active_edge) = edge_type
32
32
  WITH node_with_dup_edges, edge_type, edge_branch, peer, active_edge
33
33
  ORDER BY active_edge.from ASC
34
34
  WITH node_with_dup_edges, edge_type, edge_branch, peer, head(collect(active_edge.from)) AS active_from
35
- OPTIONAL MATCH (node_with_dup_edges)-[deleted_edge {branch: edge_branch, status: "deleted"}]->(peer)
35
+ OPTIONAL MATCH (node_with_dup_edges)-[deleted_edge {branch: edge_branch, status: "deleted"}]-(peer)
36
36
  WITH node_with_dup_edges, edge_type, edge_branch, peer, active_from, deleted_edge
37
37
  ORDER BY deleted_edge.from ASC
38
38
  WITH node_with_dup_edges, edge_type, edge_branch, peer, active_from, head(collect(deleted_edge.from)) AS deleted_from
@@ -40,71 +40,82 @@ CALL (node_with_dup_edges, edge_type, edge_branch, peer) {
40
40
  // Plan one active edge update with correct from and to times
41
41
  // ------------
42
42
  CALL (node_with_dup_edges, edge_type, edge_branch, peer, active_from, deleted_from) {
43
- MATCH (node_with_dup_edges)-[active_e {branch: edge_branch, status: "active"}]->(peer)
43
+ OPTIONAL MATCH (node_with_dup_edges)-[active_e {branch: edge_branch, status: "active"}]->(peer)
44
44
  WHERE type(active_e) = edge_type
45
45
  WITH node_with_dup_edges, edge_type, edge_branch, peer, active_from, deleted_from, active_e
46
46
  ORDER BY %(id_func_name)s(active_e)
47
47
  LIMIT 1
48
48
  WITH active_e, properties(active_e) AS before_props, {from: active_from, to: deleted_from} AS prop_updates
49
- RETURN [
50
- {
51
- db_id: %(id_func_name)s(active_e), before_props: before_props, prop_updates: prop_updates
52
- }
53
- ] AS active_edges_to_update
49
+ RETURN CASE
50
+ WHEN active_e IS NOT NULL THEN [
51
+ {
52
+ db_id: %(id_func_name)s(active_e), before_props: before_props, prop_updates: prop_updates
53
+ }
54
+ ]
55
+ ELSE []
56
+ END AS active_edges_to_update
54
57
  }
55
58
  // ------------
56
59
  // Plan deletes for all the other active edges of this type on this branch
57
60
  // ------------
58
61
  CALL (node_with_dup_edges, edge_type, edge_branch, peer) {
59
- MATCH (node_with_dup_edges)-[active_e {branch: edge_branch, status: "active"}]->(peer)
62
+ OPTIONAL MATCH (node_with_dup_edges)-[active_e {branch: edge_branch, status: "active"}]->(peer)
60
63
  WHERE type(active_e) = edge_type
61
64
  WITH node_with_dup_edges, peer, active_e
62
65
  ORDER BY %(id_func_name)s(active_e)
63
66
  SKIP 1
64
- RETURN collect(
65
- {
67
+ WITH CASE
68
+ WHEN active_e IS NOT NULL THEN {
66
69
  db_id: %(id_func_name)s(active_e),
67
- from_id: %(id_func_name)s(node_with_dup_edges),
68
- to_id: %(id_func_name)s(peer),
70
+ from_id: %(id_func_name)s(startNode(active_e)),
71
+ to_id: %(id_func_name)s(endNode(active_e)),
69
72
  edge_type: type(active_e),
70
73
  before_props: properties(active_e)
71
74
  }
72
- ) AS active_edges_to_delete
75
+ ELSE NULL
76
+ END AS serialized_edge
77
+ RETURN collect(serialized_edge) AS active_edges_to_delete
73
78
  }
74
79
  // ------------
75
80
  // Plan one deleted edge update with correct from time
76
81
  // ------------
77
82
  CALL (node_with_dup_edges, edge_type, edge_branch, peer, deleted_from) {
78
- MATCH (node_with_dup_edges)-[deleted_e {branch: edge_branch, status: "deleted"}]->(peer)
83
+ OPTIONAL MATCH (node_with_dup_edges)-[deleted_e {branch: edge_branch, status: "deleted"}]->(peer)
79
84
  WHERE type(deleted_e) = edge_type
80
85
  WITH node_with_dup_edges, edge_type, edge_branch, peer, deleted_from, deleted_e
81
86
  ORDER BY %(id_func_name)s(deleted_e)
82
87
  LIMIT 1
83
88
  WITH deleted_e, properties(deleted_e) AS before_props, {from: deleted_from} AS prop_updates
84
- RETURN [
85
- {
86
- db_id: %(id_func_name)s(deleted_e), before_props: before_props, prop_updates: prop_updates
87
- }
88
- ] AS deleted_edges_to_update
89
+ RETURN CASE
90
+ WHEN deleted_e IS NOT NULL THEN [
91
+ {
92
+ db_id: %(id_func_name)s(deleted_e), before_props: before_props, prop_updates: prop_updates
93
+ }
94
+ ]
95
+ ELSE []
96
+ END AS deleted_edges_to_update
89
97
  }
90
98
  // ------------
91
99
  // Plan deletes for all the other deleted edges of this type on this branch
92
100
  // ------------
93
101
  CALL (node_with_dup_edges, edge_type, edge_branch, peer) {
94
- MATCH (node_with_dup_edges)-[deleted_e {branch: edge_branch, status: "deleted"}]->(peer)
102
+ OPTIONAL MATCH (node_with_dup_edges)-[deleted_e {branch: edge_branch, status: "deleted"}]->(peer)
95
103
  WHERE type(deleted_e) = edge_type
96
104
  WITH node_with_dup_edges, peer, deleted_e
97
105
  ORDER BY %(id_func_name)s(deleted_e)
98
106
  SKIP 1
99
- RETURN collect(
100
- {
107
+ WITH CASE
108
+ WHEN deleted_e IS NOT NULL THEN {
101
109
  db_id: %(id_func_name)s(deleted_e),
102
- from_id: %(id_func_name)s(node_with_dup_edges),
103
- to_id: %(id_func_name)s(peer),
110
+ from_id: %(id_func_name)s(startNode(deleted_e)),
111
+ to_id: %(id_func_name)s(endNode(deleted_e)),
104
112
  edge_type: type(deleted_e),
105
113
  before_props: properties(deleted_e)
106
114
  }
107
- ) AS deleted_edges_to_delete
115
+ ELSE NULL
116
+ END AS serialized_edge
117
+
118
+ RETURN collect(serialized_edge) AS deleted_edges_to_delete
108
119
  }
109
120
  RETURN
110
121
  active_edges_to_update + deleted_edges_to_update AS edges_to_update,
@@ -0,0 +1,22 @@
1
+ from infrahub.core.registry import registry
2
+ from infrahub.exceptions import SchemaNotFoundError
3
+
4
+
5
+ def get_branches_with_schema_number_pool(kind: str, attribute_name: str) -> list[str]:
6
+ """Return branches where schema defined NumberPool exists"""
7
+
8
+ registered_branches = []
9
+ active_branches = registry.schema.get_branches()
10
+
11
+ for active_branch in active_branches:
12
+ try:
13
+ schema = registry.schema.get(name=kind, branch=active_branch)
14
+ except SchemaNotFoundError:
15
+ continue
16
+
17
+ if attribute_name in schema.attribute_names:
18
+ attribute = schema.get_attribute(name=attribute_name)
19
+ if attribute.kind == "NumberPool":
20
+ registered_branches.append(active_branch)
21
+
22
+ return registered_branches
@@ -0,0 +1,56 @@
1
+ from __future__ import annotations
2
+
3
+ from prefect import flow
4
+ from prefect.logging import get_run_logger
5
+
6
+ from infrahub.context import InfrahubContext # noqa: TC001 needed for prefect flow
7
+ from infrahub.core.constants import NumberPoolType
8
+ from infrahub.core.manager import NodeManager
9
+ from infrahub.core.protocols import CoreNumberPool
10
+ from infrahub.core.registry import registry
11
+ from infrahub.core.schema.attribute_parameters import NumberPoolParameters
12
+ from infrahub.pools.registration import get_branches_with_schema_number_pool
13
+ from infrahub.services import InfrahubServices # noqa: TC001 needed for prefect flow
14
+
15
+
16
+ @flow(
17
+ name="validate-schema-number-pools",
18
+ flow_run_name="Validate schema number pools on {branch_name}",
19
+ )
20
+ async def validate_schema_number_pools(
21
+ branch_name: str, # noqa: ARG001
22
+ context: InfrahubContext, # noqa: ARG001
23
+ service: InfrahubServices,
24
+ ) -> None:
25
+ log = get_run_logger()
26
+
27
+ async with service.database.start_session() as dbs:
28
+ schema_number_pools = await NodeManager.query(
29
+ db=dbs, schema=CoreNumberPool, filters={"pool_type__value": NumberPoolType.SCHEMA.value}
30
+ )
31
+
32
+ for schema_number_pool in list(schema_number_pools):
33
+ defined_on_branches = get_branches_with_schema_number_pool(
34
+ kind=schema_number_pool.node.value, attribute_name=schema_number_pool.node_attribute.value
35
+ )
36
+ if registry.default_branch in defined_on_branches:
37
+ schema = registry.schema.get(name=schema_number_pool.node.value, branch=registry.default_branch)
38
+ attribute = schema.get_attribute(name=schema_number_pool.node_attribute.value)
39
+ number_pool_updated = False
40
+ if isinstance(attribute.parameters, NumberPoolParameters):
41
+ if schema_number_pool.start_range.value != attribute.parameters.start_range:
42
+ schema_number_pool.start_range.value = attribute.parameters.start_range
43
+ number_pool_updated = True
44
+ if schema_number_pool.end_range.value != attribute.parameters.end_range:
45
+ schema_number_pool.end_range.value = attribute.parameters.end_range
46
+ number_pool_updated = True
47
+
48
+ if number_pool_updated:
49
+ log.info(
50
+ f"Updating NumberPool={schema_number_pool.id} based on changes in the schema on {registry.default_branch}"
51
+ )
52
+ await schema_number_pool.save(db=service.database)
53
+
54
+ elif not defined_on_branches:
55
+ log.info(f"Deleting number pool (id={schema_number_pool.id}) as it is no longer defined in the schema")
56
+ await schema_number_pool.delete(db=service.database)
@@ -254,12 +254,12 @@ async def run_proposed_change_data_integrity_check(
254
254
  """Triggers a data integrity validation check on the provided proposed change to start."""
255
255
  await add_tags(branches=[model.source_branch], nodes=[model.proposed_change])
256
256
 
257
- async with service.database.start_transaction() as dbt:
258
- destination_branch = await registry.get_branch(db=dbt, branch=model.destination_branch)
259
- source_branch = await registry.get_branch(db=dbt, branch=model.source_branch)
257
+ async with service.database.start_session() as dbs:
258
+ destination_branch = await registry.get_branch(db=dbs, branch=model.destination_branch)
259
+ source_branch = await registry.get_branch(db=dbs, branch=model.source_branch)
260
260
  component_registry = get_component_registry()
261
261
 
262
- diff_coordinator = await component_registry.get_component(DiffCoordinator, db=dbt, branch=source_branch)
262
+ diff_coordinator = await component_registry.get_component(DiffCoordinator, db=dbs, branch=source_branch)
263
263
  await diff_coordinator.update_branch_diff(base_branch=destination_branch, diff_branch=source_branch)
264
264
 
265
265
 
@@ -1006,11 +1006,11 @@ async def run_proposed_change_pipeline(
1006
1006
 
1007
1007
  await _gather_repository_repository_diffs(repositories=repositories, service=service)
1008
1008
 
1009
- async with service.database.start_transaction() as dbt:
1010
- destination_branch = await registry.get_branch(db=dbt, branch=model.destination_branch)
1011
- source_branch = await registry.get_branch(db=dbt, branch=model.source_branch)
1009
+ async with service.database.start_session() as dbs:
1010
+ destination_branch = await registry.get_branch(db=dbs, branch=model.destination_branch)
1011
+ source_branch = await registry.get_branch(db=dbs, branch=model.source_branch)
1012
1012
  component_registry = get_component_registry()
1013
- diff_coordinator = await component_registry.get_component(DiffCoordinator, db=dbt, branch=source_branch)
1013
+ diff_coordinator = await component_registry.get_component(DiffCoordinator, db=dbs, branch=source_branch)
1014
1014
  await diff_coordinator.update_branch_diff(base_branch=destination_branch, diff_branch=source_branch)
1015
1015
 
1016
1016
  diff_summary = await service.client.get_diff_summary(branch=model.source_branch)
File without changes
@@ -0,0 +1,27 @@
1
+ from __future__ import annotations
2
+
3
+ from prefect import flow
4
+ from prefect.logging import get_run_logger
5
+
6
+ from infrahub.context import InfrahubContext # noqa: TC001 needed for prefect flow
7
+ from infrahub.pools.tasks import validate_schema_number_pools
8
+ from infrahub.services import InfrahubServices # noqa: TC001 needed for prefect flow
9
+ from infrahub.workflows.utils import wait_for_schema_to_converge
10
+
11
+
12
+ @flow(
13
+ name="schema-updated",
14
+ flow_run_name="Running actions after the schema was updated on '{branch_name}'",
15
+ )
16
+ async def schema_updated(
17
+ branch_name: str,
18
+ schema_hash: str, # noqa: ARG001
19
+ context: InfrahubContext,
20
+ service: InfrahubServices,
21
+ ) -> None:
22
+ log = get_run_logger()
23
+ await wait_for_schema_to_converge(
24
+ branch_name=branch_name, component=service.component, db=service.database, log=log
25
+ )
26
+
27
+ await validate_schema_number_pools(branch_name=branch_name, context=context, service=service)
@@ -0,0 +1,23 @@
1
+ from infrahub.events.schema_action import SchemaUpdatedEvent
2
+ from infrahub.trigger.models import BuiltinTriggerDefinition, EventTrigger, ExecuteWorkflow
3
+ from infrahub.workflows.catalogue import SCHEMA_UPDATED
4
+
5
+ TRIGGER_SCHEMA_UPDATED = BuiltinTriggerDefinition(
6
+ name="schema-updated-trigger",
7
+ trigger=EventTrigger(
8
+ events={SchemaUpdatedEvent.event_name},
9
+ ),
10
+ actions=[
11
+ ExecuteWorkflow(
12
+ workflow=SCHEMA_UPDATED,
13
+ parameters={
14
+ "branch_name": "{{ event.payload['data']['branch_name'] }}",
15
+ "schema_hash": "{{ event.payload['data']['schema_hash'] }}",
16
+ "context": {
17
+ "__prefect_kind": "json",
18
+ "value": {"__prefect_kind": "jinja", "template": "{{ event.payload['context'] | tojson }}"},
19
+ },
20
+ },
21
+ ),
22
+ ],
23
+ )
@@ -1,15 +1,19 @@
1
1
  from infrahub.actions.triggers import TRIGGER_ACTION_RULE_UPDATE
2
+ from infrahub.branch.triggers import TRIGGER_BRANCH_MERGED
2
3
  from infrahub.computed_attribute.triggers import (
3
4
  TRIGGER_COMPUTED_ATTRIBUTE_ALL_SCHEMA,
4
5
  TRIGGER_COMPUTED_ATTRIBUTE_PYTHON_SETUP_COMMIT,
5
6
  )
7
+ from infrahub.schema.triggers import TRIGGER_SCHEMA_UPDATED
6
8
  from infrahub.trigger.models import TriggerDefinition
7
9
  from infrahub.webhook.triggers import TRIGGER_WEBHOOK_DELETE, TRIGGER_WEBHOOK_SETUP_UPDATE
8
10
 
9
11
  builtin_triggers: list[TriggerDefinition] = [
10
12
  TRIGGER_ACTION_RULE_UPDATE,
13
+ TRIGGER_BRANCH_MERGED,
11
14
  TRIGGER_COMPUTED_ATTRIBUTE_ALL_SCHEMA,
12
15
  TRIGGER_COMPUTED_ATTRIBUTE_PYTHON_SETUP_COMMIT,
16
+ TRIGGER_SCHEMA_UPDATED,
13
17
  TRIGGER_WEBHOOK_DELETE,
14
18
  TRIGGER_WEBHOOK_SETUP_UPDATE,
15
19
  ]