infrahub-server 1.4.0b0__py3-none-any.whl → 1.4.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. infrahub/api/schema.py +3 -7
  2. infrahub/cli/db.py +25 -0
  3. infrahub/cli/db_commands/__init__.py +0 -0
  4. infrahub/cli/db_commands/check_inheritance.py +284 -0
  5. infrahub/cli/upgrade.py +3 -0
  6. infrahub/config.py +4 -4
  7. infrahub/core/attribute.py +6 -0
  8. infrahub/core/constants/__init__.py +1 -0
  9. infrahub/core/graph/__init__.py +1 -1
  10. infrahub/core/initialization.py +26 -21
  11. infrahub/core/manager.py +2 -2
  12. infrahub/core/migrations/__init__.py +2 -0
  13. infrahub/core/migrations/graph/__init__.py +5 -1
  14. infrahub/core/migrations/graph/m033_deduplicate_relationship_vertices.py +1 -1
  15. infrahub/core/migrations/graph/m035_orphan_relationships.py +43 -0
  16. infrahub/core/migrations/graph/{m035_drop_attr_value_index.py → m036_drop_attr_value_index.py} +3 -3
  17. infrahub/core/migrations/graph/m037_index_attr_vals.py +577 -0
  18. infrahub/core/migrations/query/node_duplicate.py +26 -3
  19. infrahub/core/migrations/schema/attribute_kind_update.py +156 -0
  20. infrahub/core/models.py +5 -1
  21. infrahub/core/node/resource_manager/ip_address_pool.py +50 -48
  22. infrahub/core/node/resource_manager/ip_prefix_pool.py +55 -53
  23. infrahub/core/node/resource_manager/number_pool.py +20 -18
  24. infrahub/core/query/branch.py +37 -20
  25. infrahub/core/query/node.py +15 -0
  26. infrahub/core/relationship/model.py +13 -13
  27. infrahub/core/schema/definitions/internal.py +1 -1
  28. infrahub/core/schema/generated/attribute_schema.py +1 -1
  29. infrahub/core/validators/attribute/kind.py +5 -1
  30. infrahub/core/validators/determiner.py +22 -2
  31. infrahub/events/__init__.py +2 -0
  32. infrahub/events/proposed_change_action.py +22 -0
  33. infrahub/graphql/context.py +1 -1
  34. infrahub/graphql/mutations/proposed_change.py +5 -0
  35. infrahub/graphql/mutations/relationship.py +1 -1
  36. infrahub/graphql/mutations/schema.py +14 -1
  37. infrahub/graphql/schema.py +3 -14
  38. infrahub/graphql/types/event.py +8 -0
  39. infrahub/permissions/__init__.py +3 -0
  40. infrahub/permissions/constants.py +13 -0
  41. infrahub/permissions/globals.py +32 -0
  42. infrahub/task_manager/event.py +5 -1
  43. infrahub_sdk/client.py +6 -6
  44. infrahub_sdk/ctl/repository.py +0 -51
  45. infrahub_sdk/ctl/schema.py +9 -9
  46. infrahub_sdk/protocols.py +6 -40
  47. infrahub_sdk/utils.py +9 -5
  48. {infrahub_server-1.4.0b0.dist-info → infrahub_server-1.4.0rc0.dist-info}/METADATA +5 -4
  49. {infrahub_server-1.4.0b0.dist-info → infrahub_server-1.4.0rc0.dist-info}/RECORD +52 -47
  50. {infrahub_server-1.4.0b0.dist-info → infrahub_server-1.4.0rc0.dist-info}/LICENSE.txt +0 -0
  51. {infrahub_server-1.4.0b0.dist-info → infrahub_server-1.4.0rc0.dist-info}/WHEEL +0 -0
  52. {infrahub_server-1.4.0b0.dist-info → infrahub_server-1.4.0rc0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,156 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any, Sequence
4
+
5
+ from infrahub.types import is_large_attribute_type
6
+
7
+ from ..query import AttributeMigrationQuery
8
+ from ..shared import AttributeSchemaMigration, MigrationResult
9
+
10
+ if TYPE_CHECKING:
11
+ from infrahub.core.branch.models import Branch
12
+ from infrahub.core.timestamp import Timestamp
13
+ from infrahub.database import InfrahubDatabase
14
+
15
+
16
+ class AttributeKindUpdateMigrationQuery(AttributeMigrationQuery):
17
+ name = "migration_attribute_kind"
18
+ insert_return = False
19
+
20
+ async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
21
+ branch_filter, branch_params = self.branch.get_query_filter_path(at=self.at)
22
+ self.params.update(branch_params)
23
+ needs_index = not is_large_attribute_type(self.migration.new_attribute_schema.kind)
24
+ self.params["needs_index"] = needs_index
25
+ self.params["branch"] = self.branch.name
26
+ self.params["branch_level"] = self.branch.hierarchy_level
27
+ self.params["at"] = self.at.to_string()
28
+ self.params["attr_name"] = self.migration.previous_attribute_schema.name
29
+ new_attr_value_labels = "AttributeValue"
30
+ if needs_index:
31
+ new_attr_value_labels += ":AttributeValueIndexed"
32
+ # ruff: noqa: S608
33
+ query = """
34
+ // ------------
35
+ // start with all the Attribute vertices we might care about
36
+ // ------------
37
+ MATCH (n:%(schema_kind)s)-[:HAS_ATTRIBUTE]->(attr:Attribute)
38
+ WHERE attr.name = $attr_name
39
+ WITH DISTINCT n, attr
40
+
41
+ // ------------
42
+ // for each Attribute, find the most recent active edge and AttributeValue vertex that needs to be [un]indexed
43
+ // ------------
44
+ CALL (n, attr) {
45
+ MATCH (n)-[r1:HAS_ATTRIBUTE]->(attr:Attribute)-[r2:HAS_VALUE]->(av)
46
+ WHERE all(r IN [r1, r2] WHERE %(branch_filter)s)
47
+ WITH r2, av, r1.status = "active" AND r2.status = "active" AS is_active
48
+ ORDER BY r2.branch_level DESC, r2.from DESC, r2.status = "active" DESC, r1.branch_level DESC, r1.from DESC, r1.status = "active" DESC
49
+ LIMIT 1
50
+ WITH r2 AS has_value_e, av, "AttributeValueIndexed" IN labels(av) AS is_indexed
51
+ WHERE is_active AND is_indexed <> $needs_index
52
+ RETURN has_value_e, av
53
+ }
54
+
55
+ // ------------
56
+ // check if the correct AttributeValue vertex to use exists
57
+ // create it if not
58
+ // ------------
59
+ WITH DISTINCT av.is_default AS av_is_default, av.value AS av_value
60
+ CALL (av_is_default, av_value) {
61
+ OPTIONAL MATCH (existing_av:AttributeValue {is_default: av_is_default, value: av_value})
62
+ WHERE "AttributeValueIndexed" IN labels(existing_av) = $needs_index
63
+ WITH existing_av WHERE existing_av IS NULL
64
+ LIMIT 1
65
+ CREATE (:%(new_attr_value_labels)s {is_default: av_is_default, value: av_value})
66
+ }
67
+
68
+ // ------------
69
+ // get all the AttributeValue vertices that need to be updated again and run the updates
70
+ // ------------
71
+ WITH 1 AS one
72
+ LIMIT 1
73
+ MATCH (n:%(schema_kind)s)-[:HAS_ATTRIBUTE]->(attr:Attribute)
74
+ WHERE attr.name = $attr_name
75
+ WITH DISTINCT n, attr
76
+
77
+ // ------------
78
+ // for each Attribute, find the most recent active edge and AttributeValue vertex that needs to be [un]indexed
79
+ // ------------
80
+ CALL (n, attr) {
81
+ MATCH (n)-[r1:HAS_ATTRIBUTE]->(attr:Attribute)-[r2:HAS_VALUE]->(av)
82
+ WHERE all(r IN [r1, r2] WHERE %(branch_filter)s)
83
+ WITH r2, av, r1.status = "active" AND r2.status = "active" AS is_active
84
+ ORDER BY r2.branch_level DESC, r2.from DESC, r2.status = "active" DESC, r1.branch_level DESC, r1.from DESC, r1.status = "active" DESC
85
+ LIMIT 1
86
+ WITH r2 AS has_value_e, av, "AttributeValueIndexed" IN labels(av) AS is_indexed
87
+ WHERE is_active AND is_indexed <> $needs_index
88
+ RETURN has_value_e, av
89
+ }
90
+
91
+
92
+ // ------------
93
+ // create and update the HAS_VALUE edges
94
+ // ------------
95
+ CALL (attr, has_value_e, av) {
96
+ // ------------
97
+ // get the correct AttributeValue vertex b/c it definitely exists now
98
+ // ------------
99
+ MATCH (new_av:%(new_attr_value_labels)s {is_default: av.is_default, value: av.value})
100
+ WHERE "AttributeValueIndexed" IN labels(new_av) = $needs_index
101
+ LIMIT 1
102
+
103
+ // ------------
104
+ // create the new HAS_VALUE edge
105
+ // ------------
106
+ CREATE (attr)-[new_has_value_e:HAS_VALUE]->(new_av)
107
+ SET new_has_value_e = properties(has_value_e)
108
+ SET new_has_value_e.status = "active"
109
+ SET new_has_value_e.branch = $branch
110
+ SET new_has_value_e.branch_level = $branch_level
111
+ SET new_has_value_e.from = $at
112
+ SET new_has_value_e.to = NULL
113
+
114
+ // ------------
115
+ // if we are updating on a branch and the existing edge is on the default branch,
116
+ // then create a new deleted edge on this branch
117
+ // ------------
118
+ WITH attr, has_value_e, av
119
+ WHERE has_value_e.branch <> $branch
120
+ CREATE (attr)-[deleted_has_value_e:HAS_VALUE]->(av)
121
+ SET deleted_has_value_e = properties(has_value_e)
122
+ SET deleted_has_value_e.status = "deleted"
123
+ SET deleted_has_value_e.branch = $branch
124
+ SET deleted_has_value_e.branch_level = $branch_level
125
+ SET deleted_has_value_e.from = $at
126
+ SET deleted_has_value_e.to = NULL
127
+ }
128
+
129
+ // ------------
130
+ // if the existing edge is on the same branch as the update,
131
+ // then set its "to" time
132
+ // ------------
133
+ CALL (has_value_e) {
134
+ WITH has_value_e
135
+ WHERE has_value_e.branch = $branch
136
+ SET has_value_e.to = $at
137
+ }
138
+ """ % {
139
+ "schema_kind": self.migration.previous_schema.kind,
140
+ "branch_filter": branch_filter,
141
+ "new_attr_value_labels": new_attr_value_labels,
142
+ }
143
+ self.add_to_query(query)
144
+
145
+
146
+ class AttributeKindUpdateMigration(AttributeSchemaMigration):
147
+ name: str = "attribute.kind.update"
148
+ queries: Sequence[type[AttributeMigrationQuery]] = [AttributeKindUpdateMigrationQuery] # type: ignore[assignment]
149
+
150
+ async def execute(self, db: InfrahubDatabase, branch: Branch, at: Timestamp | str | None = None) -> MigrationResult:
151
+ is_indexed_previous = is_large_attribute_type(self.previous_attribute_schema.kind)
152
+ is_indexed_new = is_large_attribute_type(self.new_attribute_schema.kind)
153
+ if is_indexed_previous is is_indexed_new:
154
+ return MigrationResult()
155
+
156
+ return await super().execute(db=db, branch=branch, at=at)
infrahub/core/models.py CHANGED
@@ -569,7 +569,11 @@ class HashableModel(BaseModel):
569
569
 
570
570
  for field_name in other.model_fields.keys():
571
571
  if not hasattr(self, field_name):
572
- setattr(self, field_name, getattr(other, field_name))
572
+ try:
573
+ setattr(self, field_name, getattr(other, field_name))
574
+ except ValueError:
575
+ # handles the case where self and other are different types and other has fields that self does not
576
+ pass
573
577
  continue
574
578
 
575
579
  attr_other = getattr(other, field_name)
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import ipaddress
4
4
  from typing import TYPE_CHECKING, Any
5
5
 
6
+ from infrahub import lock
6
7
  from infrahub.core import registry
7
8
  from infrahub.core.ipam.reconciler import IpamReconciler
8
9
  from infrahub.core.query.ipam import get_ip_addresses
@@ -33,54 +34,55 @@ class CoreIPAddressPool(Node):
33
34
  prefixlen: int | None = None,
34
35
  at: Timestamp | None = None,
35
36
  ) -> Node:
36
- # Check if there is already a resource allocated with this identifier
37
- # if not, pull all existing prefixes and allocated the next available
38
-
39
- if identifier:
40
- query_get = await IPAddressPoolGetReserved.init(db=db, pool_id=self.id, identifier=identifier)
41
- await query_get.execute(db=db)
42
- result = query_get.get_result()
43
-
44
- if result:
45
- address = result.get_node("address")
46
- # TODO add support for branch, if the node is reserved with this id in another branch we should return an error
47
- node = await registry.manager.get_one(db=db, id=address.get("uuid"), branch=branch)
48
-
49
- if node:
50
- return node
51
-
52
- data = data or {}
53
-
54
- address_type = address_type or data.get("address_type") or self.default_address_type.value # type: ignore[attr-defined]
55
- if not address_type:
56
- raise ValueError(
57
- f"IPAddressPool: {self.name.value} | " # type: ignore[attr-defined]
58
- "An address_type or a default_value type must be provided to allocate a new IP address"
59
- )
60
-
61
- ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
62
-
63
- prefixlen = prefixlen or data.get("prefixlen") or self.default_prefix_length.value # type: ignore[attr-defined]
64
-
65
- next_address = await self.get_next(db=db, prefixlen=prefixlen)
66
-
67
- target_schema = registry.get_node_schema(name=address_type, branch=branch)
68
- node = await Node.init(db=db, schema=target_schema, branch=branch, at=at)
69
- try:
70
- await node.new(db=db, address=str(next_address), ip_namespace=ip_namespace, **data)
71
- except ValidationError as exc:
72
- raise ValueError(f"IPAddressPool: {self.name.value} | {exc!s}") from exc # type: ignore[attr-defined]
73
- await node.save(db=db, at=at)
74
- reconciler = IpamReconciler(db=db, branch=branch)
75
- await reconciler.reconcile(ip_value=next_address, namespace=ip_namespace.id, node_uuid=node.get_id())
76
-
77
- if identifier:
78
- query_set = await IPAddressPoolSetReserved.init(
79
- db=db, pool_id=self.id, identifier=identifier, address_id=node.id, at=at
80
- )
81
- await query_set.execute(db=db)
82
-
83
- return node
37
+ async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
38
+ # Check if there is already a resource allocated with this identifier
39
+ # if not, pull all existing prefixes and allocated the next available
40
+
41
+ if identifier:
42
+ query_get = await IPAddressPoolGetReserved.init(db=db, pool_id=self.id, identifier=identifier)
43
+ await query_get.execute(db=db)
44
+ result = query_get.get_result()
45
+
46
+ if result:
47
+ address = result.get_node("address")
48
+ # TODO add support for branch, if the node is reserved with this id in another branch we should return an error
49
+ node = await registry.manager.get_one(db=db, id=address.get("uuid"), branch=branch)
50
+
51
+ if node:
52
+ return node
53
+
54
+ data = data or {}
55
+
56
+ address_type = address_type or data.get("address_type") or self.default_address_type.value # type: ignore[attr-defined]
57
+ if not address_type:
58
+ raise ValueError(
59
+ f"IPAddressPool: {self.name.value} | " # type: ignore[attr-defined]
60
+ "An address_type or a default_value type must be provided to allocate a new IP address"
61
+ )
62
+
63
+ ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
64
+
65
+ prefixlen = prefixlen or data.get("prefixlen") or self.default_prefix_length.value # type: ignore[attr-defined]
66
+
67
+ next_address = await self.get_next(db=db, prefixlen=prefixlen)
68
+
69
+ target_schema = registry.get_node_schema(name=address_type, branch=branch)
70
+ node = await Node.init(db=db, schema=target_schema, branch=branch, at=at)
71
+ try:
72
+ await node.new(db=db, address=str(next_address), ip_namespace=ip_namespace, **data)
73
+ except ValidationError as exc:
74
+ raise ValueError(f"IPAddressPool: {self.name.value} | {exc!s}") from exc # type: ignore[attr-defined]
75
+ await node.save(db=db, at=at)
76
+ reconciler = IpamReconciler(db=db, branch=branch)
77
+ await reconciler.reconcile(ip_value=next_address, namespace=ip_namespace.id, node_uuid=node.get_id())
78
+
79
+ if identifier:
80
+ query_set = await IPAddressPoolSetReserved.init(
81
+ db=db, pool_id=self.id, identifier=identifier, address_id=node.id, at=at
82
+ )
83
+ await query_set.execute(db=db)
84
+
85
+ return node
84
86
 
85
87
  async def get_next(self, db: InfrahubDatabase, prefixlen: int | None = None) -> IPAddressType:
86
88
  resources = await self.resources.get_peers(db=db) # type: ignore[attr-defined]
@@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Any
5
5
 
6
6
  from netaddr import IPSet
7
7
 
8
+ from infrahub import lock
8
9
  from infrahub.core import registry
9
10
  from infrahub.core.ipam.reconciler import IpamReconciler
10
11
  from infrahub.core.query.ipam import get_subnets
@@ -36,59 +37,60 @@ class CoreIPPrefixPool(Node):
36
37
  prefix_type: str | None = None,
37
38
  at: Timestamp | None = None,
38
39
  ) -> Node:
39
- # Check if there is already a resource allocated with this identifier
40
- # if not, pull all existing prefixes and allocated the next available
41
- if identifier:
42
- query_get = await PrefixPoolGetReserved.init(db=db, pool_id=self.id, identifier=identifier)
43
- await query_get.execute(db=db)
44
- result = query_get.get_result()
45
- if result:
46
- prefix = result.get_node("prefix")
47
- # TODO add support for branch, if the node is reserved with this id in another branch we should return an error
48
- node = await registry.manager.get_one(db=db, id=prefix.get("uuid"), branch=branch)
49
- if node:
50
- return node
51
-
52
- ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
53
-
54
- data = data or {}
55
-
56
- prefixlen = prefixlen or data.get("prefixlen", None) or self.default_prefix_length.value # type: ignore[attr-defined]
57
- if not prefixlen:
58
- raise ValueError(
59
- f"IPPrefixPool: {self.name.value} | " # type: ignore[attr-defined]
60
- "A prefixlen or a default_value must be provided to allocate a new prefix"
61
- )
62
-
63
- next_prefix = await self.get_next(db=db, prefixlen=prefixlen)
64
-
65
- prefix_type = prefix_type or data.get("prefix_type", None) or self.default_prefix_type.value # type: ignore[attr-defined]
66
- if not prefix_type:
67
- raise ValueError(
68
- f"IPPrefixPool: {self.name.value} | " # type: ignore[attr-defined]
69
- "A prefix_type or a default_value type must be provided to allocate a new prefix"
70
- )
71
-
72
- member_type = member_type or data.get("member_type", None) or self.default_member_type.value.value # type: ignore[attr-defined]
73
- data["member_type"] = member_type
74
-
75
- target_schema = registry.get_node_schema(name=prefix_type, branch=branch)
76
- node = await Node.init(db=db, schema=target_schema, branch=branch, at=at)
77
- try:
78
- await node.new(db=db, prefix=str(next_prefix), ip_namespace=ip_namespace, **data)
79
- except ValidationError as exc:
80
- raise ValueError(f"IPPrefixPool: {self.name.value} | {exc!s}") from exc # type: ignore[attr-defined]
81
- await node.save(db=db, at=at)
82
- reconciler = IpamReconciler(db=db, branch=branch)
83
- await reconciler.reconcile(ip_value=next_prefix, namespace=ip_namespace.id, node_uuid=node.get_id())
84
-
85
- if identifier:
86
- query_set = await PrefixPoolSetReserved.init(
87
- db=db, pool_id=self.id, identifier=identifier, prefix_id=node.id, at=at
88
- )
89
- await query_set.execute(db=db)
90
-
91
- return node
40
+ async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
41
+ # Check if there is already a resource allocated with this identifier
42
+ # if not, pull all existing prefixes and allocated the next available
43
+ if identifier:
44
+ query_get = await PrefixPoolGetReserved.init(db=db, pool_id=self.id, identifier=identifier)
45
+ await query_get.execute(db=db)
46
+ result = query_get.get_result()
47
+ if result:
48
+ prefix = result.get_node("prefix")
49
+ # TODO add support for branch, if the node is reserved with this id in another branch we should return an error
50
+ node = await registry.manager.get_one(db=db, id=prefix.get("uuid"), branch=branch)
51
+ if node:
52
+ return node
53
+
54
+ ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
55
+
56
+ data = data or {}
57
+
58
+ prefixlen = prefixlen or data.get("prefixlen", None) or self.default_prefix_length.value # type: ignore[attr-defined]
59
+ if not prefixlen:
60
+ raise ValueError(
61
+ f"IPPrefixPool: {self.name.value} | " # type: ignore[attr-defined]
62
+ "A prefixlen or a default_value must be provided to allocate a new prefix"
63
+ )
64
+
65
+ next_prefix = await self.get_next(db=db, prefixlen=prefixlen)
66
+
67
+ prefix_type = prefix_type or data.get("prefix_type", None) or self.default_prefix_type.value # type: ignore[attr-defined]
68
+ if not prefix_type:
69
+ raise ValueError(
70
+ f"IPPrefixPool: {self.name.value} | " # type: ignore[attr-defined]
71
+ "A prefix_type or a default_value type must be provided to allocate a new prefix"
72
+ )
73
+
74
+ member_type = member_type or data.get("member_type", None) or self.default_member_type.value.value # type: ignore[attr-defined]
75
+ data["member_type"] = member_type
76
+
77
+ target_schema = registry.get_node_schema(name=prefix_type, branch=branch)
78
+ node = await Node.init(db=db, schema=target_schema, branch=branch, at=at)
79
+ try:
80
+ await node.new(db=db, prefix=str(next_prefix), ip_namespace=ip_namespace, **data)
81
+ except ValidationError as exc:
82
+ raise ValueError(f"IPPrefixPool: {self.name.value} | {exc!s}") from exc # type: ignore[attr-defined]
83
+ await node.save(db=db, at=at)
84
+ reconciler = IpamReconciler(db=db, branch=branch)
85
+ await reconciler.reconcile(ip_value=next_prefix, namespace=ip_namespace.id, node_uuid=node.get_id())
86
+
87
+ if identifier:
88
+ query_set = await PrefixPoolSetReserved.init(
89
+ db=db, pool_id=self.id, identifier=identifier, prefix_id=node.id, at=at
90
+ )
91
+ await query_set.execute(db=db)
92
+
93
+ return node
92
94
 
93
95
  async def get_next(self, db: InfrahubDatabase, prefixlen: int) -> IPNetworkType:
94
96
  resources = await self.resources.get_peers(db=db) # type: ignore[attr-defined]
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from typing import TYPE_CHECKING
4
4
 
5
+ from infrahub import lock
5
6
  from infrahub.core import registry
6
7
  from infrahub.core.query.resource_manager import NumberPoolGetReserved, NumberPoolGetUsed, NumberPoolSetReserved
7
8
  from infrahub.core.schema.attribute_parameters import NumberAttributeParameters
@@ -62,24 +63,25 @@ class CoreNumberPool(Node):
62
63
  identifier: str | None = None,
63
64
  at: Timestamp | None = None,
64
65
  ) -> int:
65
- # NOTE: ideally we should use the HFID as the identifier (if available)
66
- # one of the challenge with using the HFID is that it might change over time
67
- # so we need to ensure that the identifier is stable, or we need to handle the case where the identifier changes
68
- identifier = identifier or node.get_id()
69
-
70
- # Check if there is already a resource allocated with this identifier
71
- # if not, pull all existing number and allocate the next available
72
- # TODO add support for branch, if the node is reserved with this id in another branch we should return an error
73
- query_get = await NumberPoolGetReserved.init(db=db, branch=branch, pool_id=self.id, identifier=identifier)
74
- await query_get.execute(db=db)
75
- reservation = query_get.get_reservation()
76
- if reservation is not None:
77
- return reservation
78
-
79
- # If we have not returned a value we need to find one if avaiable
80
- number = await self.get_next(db=db, branch=branch, attribute=attribute)
81
- await self.reserve(db=db, number=number, identifier=identifier, at=at)
82
- return number
66
+ async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
67
+ # NOTE: ideally we should use the HFID as the identifier (if available)
68
+ # one of the challenge with using the HFID is that it might change over time
69
+ # so we need to ensure that the identifier is stable, or we need to handle the case where the identifier changes
70
+ identifier = identifier or node.get_id()
71
+
72
+ # Check if there is already a resource allocated with this identifier
73
+ # if not, pull all existing number and allocate the next available
74
+ # TODO add support for branch, if the node is reserved with this id in another branch we should return an error
75
+ query_get = await NumberPoolGetReserved.init(db=db, branch=branch, pool_id=self.id, identifier=identifier)
76
+ await query_get.execute(db=db)
77
+ reservation = query_get.get_reservation()
78
+ if reservation is not None:
79
+ return reservation
80
+
81
+ # If we have not returned a value we need to find one if avaiable
82
+ number = await self.get_next(db=db, branch=branch, attribute=attribute)
83
+ await self.reserve(db=db, number=number, identifier=identifier, at=at)
84
+ return number
83
85
 
84
86
  async def get_next(self, db: InfrahubDatabase, branch: Branch, attribute: AttributeSchema) -> int:
85
87
  taken = await self.get_used(db=db, branch=branch)
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
5
  from infrahub import config
6
+ from infrahub.core.constants import GLOBAL_BRANCH_NAME
6
7
  from infrahub.core.query import Query, QueryType
7
8
 
8
9
  if TYPE_CHECKING:
@@ -21,33 +22,49 @@ class DeleteBranchRelationshipsQuery(Query):
21
22
 
22
23
  async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
23
24
  query = """
24
- // delete all relationships on this branch
25
- MATCH (s)-[r1]-(d)
26
- WHERE r1.branch = $branch_name
27
- CALL (r1) {
28
- DELETE r1
25
+ // --------------
26
+ // for every Node created on this branch (it's about to be deleted), find any agnostic relationships
27
+ // connected to the Node and delete them
28
+ // --------------
29
+ OPTIONAL MATCH (:Root)<-[e:IS_PART_OF {status: "active"}]-(n:Node)
30
+ WHERE e.branch = $branch_name
31
+ CALL (n) {
32
+ OPTIONAL MATCH (n)-[:IS_RELATED {branch: $global_branch_name}]-(rel:Relationship)
33
+ DETACH DELETE rel
29
34
  } IN TRANSACTIONS
30
35
 
31
- // check for any orphaned Node vertices and delete them
32
- WITH collect(DISTINCT s.uuid) + collect(DISTINCT d.uuid) AS nodes_uuids
33
- MATCH (s2:Node)-[r2]-(d2)
34
- WHERE NOT exists((s2)-[:IS_PART_OF]-(:Root))
35
- AND s2.uuid IN nodes_uuids
36
- CALL (r2) {
37
- DELETE r2
36
+ // reduce the results to a single row
37
+ WITH 1 AS one
38
+ LIMIT 1
39
+
40
+ // --------------
41
+ // for every edge on this branch, delete it
42
+ // --------------
43
+ MATCH (s)-[r]->(d)
44
+ WHERE r.branch = $branch_name
45
+ CALL (r) {
46
+ DELETE r
38
47
  } IN TRANSACTIONS
39
48
 
40
- // reduce results to a single row
41
- WITH 1 AS one LIMIT 1
42
-
43
- // find any orphaned vertices and delete them
44
- MATCH (n)
45
- WHERE NOT exists((n)--())
46
- CALL (n) {
47
- DELETE n
49
+ // --------------
50
+ // get the database IDs of every vertex linked to a deleted edge
51
+ // --------------
52
+ WITH DISTINCT elementId(s) AS s_id, elementId(d) AS d_id
53
+ WITH collect(s_id) + collect(d_id) AS vertex_ids
54
+ UNWIND vertex_ids AS vertex_id
55
+
56
+ // --------------
57
+ // delete any vertices that are now orphaned
58
+ // --------------
59
+ CALL (vertex_id) {
60
+ MATCH (n)
61
+ WHERE elementId(n) = vertex_id
62
+ AND NOT exists((n)--())
63
+ DELETE n
48
64
  } IN TRANSACTIONS
49
65
  """
50
66
  self.params["branch_name"] = self.branch_name
67
+ self.params["global_branch_name"] = GLOBAL_BRANCH_NAME
51
68
  self.add_to_query(query)
52
69
 
53
70
 
@@ -161,7 +161,22 @@ class NodeCreateAllQuery(NodeQuery):
161
161
  relationships: list[RelationshipCreateData] = []
162
162
  for rel_name in self.node._relationships:
163
163
  rel_manager: RelationshipManager = getattr(self.node, rel_name)
164
+ if rel_manager.schema.cardinality == "many":
165
+ # Fetch all relationship peers through a single database call for performances.
166
+ peers = await rel_manager.get_peers(db=db, branch_agnostic=self.branch_agnostic)
167
+
164
168
  for rel in rel_manager._relationships:
169
+ if rel_manager.schema.cardinality == "many":
170
+ try:
171
+ rel.set_peer(value=peers[rel.get_peer_id()])
172
+ except KeyError:
173
+ pass
174
+ except ValueError:
175
+ # Relationship has not been initialized yet, it means the peer does not exist in db yet
176
+ # typically because it will be allocated from a ressource pool. In that case, the peer
177
+ # will be fetched using `rel.resolve` later.
178
+ pass
179
+
165
180
  rel_create_data = await rel.get_create_data(db=db, at=at)
166
181
  if rel_create_data.peer_branch_level > deepest_branch_level or (
167
182
  deepest_branch_name == GLOBAL_BRANCH_NAME and rel_create_data.peer_branch == registry.default_branch