infrahub-server 1.3.6__py3-none-any.whl → 1.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
infrahub/api/schema.py CHANGED
@@ -36,6 +36,7 @@ from infrahub.events import EventMeta
36
36
  from infrahub.events.schema_action import SchemaUpdatedEvent
37
37
  from infrahub.exceptions import MigrationError
38
38
  from infrahub.log import get_log_data, get_logger
39
+ from infrahub.permissions import define_global_permission_from_branch
39
40
  from infrahub.types import ATTRIBUTE_PYTHON_TYPES
40
41
  from infrahub.worker import WORKER_IDENTITY
41
42
  from infrahub.workflows.catalogue import SCHEMA_APPLY_MIGRATION, SCHEMA_VALIDATE_MIGRATION
@@ -287,13 +288,8 @@ async def load_schema(
287
288
  context: InfrahubContext = Depends(get_context),
288
289
  ) -> SchemaUpdate:
289
290
  permission_manager.raise_for_permission(
290
- permission=GlobalPermission(
291
- action=GlobalPermissions.MANAGE_SCHEMA.value,
292
- decision=(
293
- PermissionDecision.ALLOW_DEFAULT
294
- if branch.name in (GLOBAL_BRANCH_NAME, registry.default_branch)
295
- else PermissionDecision.ALLOW_OTHER
296
- ).value,
291
+ permission=define_global_permission_from_branch(
292
+ permission=GlobalPermissions.MANAGE_SCHEMA, branch_name=branch.name
297
293
  )
298
294
  )
299
295
 
@@ -22,8 +22,6 @@ if TYPE_CHECKING:
22
22
  from neo4j.graph import Relationship as Neo4jRelationship
23
23
  from whenever import TimeDelta
24
24
 
25
- from infrahub.graphql.initialization import GraphqlContext
26
-
27
25
 
28
26
  @dataclass
29
27
  class TimeRange:
@@ -314,12 +312,6 @@ class EnrichedDiffRelationship(BaseSummary):
314
312
  )
315
313
 
316
314
 
317
- @dataclass
318
- class ParentNodeInfo:
319
- node: EnrichedDiffNode
320
- relationship_name: str = "undefined"
321
-
322
-
323
315
  @dataclass
324
316
  class EnrichedDiffNode(BaseSummary):
325
317
  identifier: NodeIdentifier
@@ -364,37 +356,6 @@ class EnrichedDiffNode(BaseSummary):
364
356
  rel.clear_conflicts()
365
357
  self.conflict = None
366
358
 
367
- def get_parent_info(self, graphql_context: GraphqlContext | None = None) -> ParentNodeInfo | None:
368
- for r in self.relationships:
369
- for n in r.nodes:
370
- relationship_name: str = "undefined"
371
-
372
- if not graphql_context:
373
- return ParentNodeInfo(node=n, relationship_name=relationship_name)
374
-
375
- node_schema = graphql_context.db.schema.get(name=self.kind)
376
- rel_schema = node_schema.get_relationship(name=r.name)
377
-
378
- parent_schema = graphql_context.db.schema.get(name=n.kind)
379
- rels_parent = parent_schema.get_relationships_by_identifier(id=rel_schema.get_identifier())
380
-
381
- if rels_parent and len(rels_parent) == 1:
382
- relationship_name = rels_parent[0].name
383
- elif rels_parent and len(rels_parent) > 1:
384
- for rel_parent in rels_parent:
385
- if (
386
- rel_schema.direction == RelationshipDirection.INBOUND
387
- and rel_parent.direction == RelationshipDirection.OUTBOUND
388
- ) or (
389
- rel_schema.direction == RelationshipDirection.OUTBOUND
390
- and rel_parent.direction == RelationshipDirection.INBOUND
391
- ):
392
- relationship_name = rel_parent.name
393
- break
394
-
395
- return ParentNodeInfo(node=n, relationship_name=relationship_name)
396
- return None
397
-
398
359
  def get_all_child_nodes(self) -> set[EnrichedDiffNode]:
399
360
  all_children = set()
400
361
  for r in self.relationships:
@@ -1 +1 @@
1
- GRAPH_VERSION = 34
1
+ GRAPH_VERSION = 35
@@ -36,6 +36,7 @@ from .m031_check_number_attributes import Migration031
36
36
  from .m032_cleanup_orphaned_branch_relationships import Migration032
37
37
  from .m033_deduplicate_relationship_vertices import Migration033
38
38
  from .m034_find_orphaned_schema_fields import Migration034
39
+ from .m035_orphan_relationships import Migration035
39
40
 
40
41
  if TYPE_CHECKING:
41
42
  from infrahub.core.root import Root
@@ -77,6 +78,7 @@ MIGRATIONS: list[type[GraphMigration | InternalSchemaMigration | ArbitraryMigrat
77
78
  Migration032,
78
79
  Migration033,
79
80
  Migration034,
81
+ Migration035,
80
82
  ]
81
83
 
82
84
 
@@ -89,7 +89,7 @@ class Migration033(GraphMigration):
89
89
  """
90
90
 
91
91
  name: str = "033_deduplicate_relationship_vertices"
92
- minimum_version: int = 31
92
+ minimum_version: int = 32
93
93
  queries: Sequence[type[Query]] = [DeduplicateRelationshipVerticesQuery]
94
94
 
95
95
  async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
@@ -0,0 +1,43 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any, Sequence
4
+
5
+ from infrahub.core.migrations.shared import GraphMigration, MigrationResult
6
+
7
+ from ...query import Query, QueryType
8
+
9
+ if TYPE_CHECKING:
10
+ from infrahub.database import InfrahubDatabase
11
+
12
+
13
+ class CleanupOrphanedRelationshipsQuery(Query):
14
+ name = "cleanup_orphaned_relationships"
15
+ type = QueryType.WRITE
16
+ insert_return = False
17
+
18
+ async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
19
+ query = """
20
+ MATCH (rel:Relationship)-[:IS_RELATED]-(peer:Node)
21
+ WITH DISTINCT rel, peer.uuid AS p_uuid
22
+ WITH rel, count(*) AS num_peers
23
+ WHERE num_peers < 2
24
+ DETACH DELETE rel
25
+ """
26
+ self.add_to_query(query)
27
+
28
+
29
+ class Migration035(GraphMigration):
30
+ """
31
+ Remove Relationship vertices that only have a single peer
32
+ """
33
+
34
+ name: str = "035_clean_up_orphaned_relationships"
35
+ minimum_version: int = 34
36
+ queries: Sequence[type[Query]] = [CleanupOrphanedRelationshipsQuery]
37
+
38
+ async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
39
+ return MigrationResult()
40
+
41
+ async def execute(self, db: InfrahubDatabase) -> MigrationResult:
42
+ # overrides parent class to skip transaction in case there are a lot of relationships to delete
43
+ return await self.do_execute(db=db)
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import ipaddress
4
4
  from typing import TYPE_CHECKING, Any
5
5
 
6
+ from infrahub import lock
6
7
  from infrahub.core import registry
7
8
  from infrahub.core.ipam.reconciler import IpamReconciler
8
9
  from infrahub.core.query.ipam import get_ip_addresses
@@ -33,54 +34,55 @@ class CoreIPAddressPool(Node):
33
34
  prefixlen: int | None = None,
34
35
  at: Timestamp | None = None,
35
36
  ) -> Node:
36
- # Check if there is already a resource allocated with this identifier
37
- # if not, pull all existing prefixes and allocated the next available
38
-
39
- if identifier:
40
- query_get = await IPAddressPoolGetReserved.init(db=db, pool_id=self.id, identifier=identifier)
41
- await query_get.execute(db=db)
42
- result = query_get.get_result()
43
-
44
- if result:
45
- address = result.get_node("address")
46
- # TODO add support for branch, if the node is reserved with this id in another branch we should return an error
47
- node = await registry.manager.get_one(db=db, id=address.get("uuid"), branch=branch)
48
-
49
- if node:
50
- return node
51
-
52
- data = data or {}
53
-
54
- address_type = address_type or data.get("address_type") or self.default_address_type.value # type: ignore[attr-defined]
55
- if not address_type:
56
- raise ValueError(
57
- f"IPAddressPool: {self.name.value} | " # type: ignore[attr-defined]
58
- "An address_type or a default_value type must be provided to allocate a new IP address"
59
- )
60
-
61
- ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
62
-
63
- prefixlen = prefixlen or data.get("prefixlen") or self.default_prefix_length.value # type: ignore[attr-defined]
64
-
65
- next_address = await self.get_next(db=db, prefixlen=prefixlen)
66
-
67
- target_schema = registry.get_node_schema(name=address_type, branch=branch)
68
- node = await Node.init(db=db, schema=target_schema, branch=branch, at=at)
69
- try:
70
- await node.new(db=db, address=str(next_address), ip_namespace=ip_namespace, **data)
71
- except ValidationError as exc:
72
- raise ValueError(f"IPAddressPool: {self.name.value} | {exc!s}") from exc # type: ignore[attr-defined]
73
- await node.save(db=db, at=at)
74
- reconciler = IpamReconciler(db=db, branch=branch)
75
- await reconciler.reconcile(ip_value=next_address, namespace=ip_namespace.id, node_uuid=node.get_id())
76
-
77
- if identifier:
78
- query_set = await IPAddressPoolSetReserved.init(
79
- db=db, pool_id=self.id, identifier=identifier, address_id=node.id, at=at
80
- )
81
- await query_set.execute(db=db)
82
-
83
- return node
37
+ async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
38
+ # Check if there is already a resource allocated with this identifier
39
+ # if not, pull all existing prefixes and allocated the next available
40
+
41
+ if identifier:
42
+ query_get = await IPAddressPoolGetReserved.init(db=db, pool_id=self.id, identifier=identifier)
43
+ await query_get.execute(db=db)
44
+ result = query_get.get_result()
45
+
46
+ if result:
47
+ address = result.get_node("address")
48
+ # TODO add support for branch, if the node is reserved with this id in another branch we should return an error
49
+ node = await registry.manager.get_one(db=db, id=address.get("uuid"), branch=branch)
50
+
51
+ if node:
52
+ return node
53
+
54
+ data = data or {}
55
+
56
+ address_type = address_type or data.get("address_type") or self.default_address_type.value # type: ignore[attr-defined]
57
+ if not address_type:
58
+ raise ValueError(
59
+ f"IPAddressPool: {self.name.value} | " # type: ignore[attr-defined]
60
+ "An address_type or a default_value type must be provided to allocate a new IP address"
61
+ )
62
+
63
+ ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
64
+
65
+ prefixlen = prefixlen or data.get("prefixlen") or self.default_prefix_length.value # type: ignore[attr-defined]
66
+
67
+ next_address = await self.get_next(db=db, prefixlen=prefixlen)
68
+
69
+ target_schema = registry.get_node_schema(name=address_type, branch=branch)
70
+ node = await Node.init(db=db, schema=target_schema, branch=branch, at=at)
71
+ try:
72
+ await node.new(db=db, address=str(next_address), ip_namespace=ip_namespace, **data)
73
+ except ValidationError as exc:
74
+ raise ValueError(f"IPAddressPool: {self.name.value} | {exc!s}") from exc # type: ignore[attr-defined]
75
+ await node.save(db=db, at=at)
76
+ reconciler = IpamReconciler(db=db, branch=branch)
77
+ await reconciler.reconcile(ip_value=next_address, namespace=ip_namespace.id, node_uuid=node.get_id())
78
+
79
+ if identifier:
80
+ query_set = await IPAddressPoolSetReserved.init(
81
+ db=db, pool_id=self.id, identifier=identifier, address_id=node.id, at=at
82
+ )
83
+ await query_set.execute(db=db)
84
+
85
+ return node
84
86
 
85
87
  async def get_next(self, db: InfrahubDatabase, prefixlen: int | None = None) -> IPAddressType:
86
88
  resources = await self.resources.get_peers(db=db) # type: ignore[attr-defined]
@@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Any
5
5
 
6
6
  from netaddr import IPSet
7
7
 
8
+ from infrahub import lock
8
9
  from infrahub.core import registry
9
10
  from infrahub.core.ipam.reconciler import IpamReconciler
10
11
  from infrahub.core.query.ipam import get_subnets
@@ -36,59 +37,60 @@ class CoreIPPrefixPool(Node):
36
37
  prefix_type: str | None = None,
37
38
  at: Timestamp | None = None,
38
39
  ) -> Node:
39
- # Check if there is already a resource allocated with this identifier
40
- # if not, pull all existing prefixes and allocated the next available
41
- if identifier:
42
- query_get = await PrefixPoolGetReserved.init(db=db, pool_id=self.id, identifier=identifier)
43
- await query_get.execute(db=db)
44
- result = query_get.get_result()
45
- if result:
46
- prefix = result.get_node("prefix")
47
- # TODO add support for branch, if the node is reserved with this id in another branch we should return an error
48
- node = await registry.manager.get_one(db=db, id=prefix.get("uuid"), branch=branch)
49
- if node:
50
- return node
51
-
52
- ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
53
-
54
- data = data or {}
55
-
56
- prefixlen = prefixlen or data.get("prefixlen", None) or self.default_prefix_length.value # type: ignore[attr-defined]
57
- if not prefixlen:
58
- raise ValueError(
59
- f"IPPrefixPool: {self.name.value} | " # type: ignore[attr-defined]
60
- "A prefixlen or a default_value must be provided to allocate a new prefix"
61
- )
62
-
63
- next_prefix = await self.get_next(db=db, prefixlen=prefixlen)
64
-
65
- prefix_type = prefix_type or data.get("prefix_type", None) or self.default_prefix_type.value # type: ignore[attr-defined]
66
- if not prefix_type:
67
- raise ValueError(
68
- f"IPPrefixPool: {self.name.value} | " # type: ignore[attr-defined]
69
- "A prefix_type or a default_value type must be provided to allocate a new prefix"
70
- )
71
-
72
- member_type = member_type or data.get("member_type", None) or self.default_member_type.value.value # type: ignore[attr-defined]
73
- data["member_type"] = member_type
74
-
75
- target_schema = registry.get_node_schema(name=prefix_type, branch=branch)
76
- node = await Node.init(db=db, schema=target_schema, branch=branch, at=at)
77
- try:
78
- await node.new(db=db, prefix=str(next_prefix), ip_namespace=ip_namespace, **data)
79
- except ValidationError as exc:
80
- raise ValueError(f"IPPrefixPool: {self.name.value} | {exc!s}") from exc # type: ignore[attr-defined]
81
- await node.save(db=db, at=at)
82
- reconciler = IpamReconciler(db=db, branch=branch)
83
- await reconciler.reconcile(ip_value=next_prefix, namespace=ip_namespace.id, node_uuid=node.get_id())
84
-
85
- if identifier:
86
- query_set = await PrefixPoolSetReserved.init(
87
- db=db, pool_id=self.id, identifier=identifier, prefix_id=node.id, at=at
88
- )
89
- await query_set.execute(db=db)
90
-
91
- return node
40
+ async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
41
+ # Check if there is already a resource allocated with this identifier
42
+ # if not, pull all existing prefixes and allocated the next available
43
+ if identifier:
44
+ query_get = await PrefixPoolGetReserved.init(db=db, pool_id=self.id, identifier=identifier)
45
+ await query_get.execute(db=db)
46
+ result = query_get.get_result()
47
+ if result:
48
+ prefix = result.get_node("prefix")
49
+ # TODO add support for branch, if the node is reserved with this id in another branch we should return an error
50
+ node = await registry.manager.get_one(db=db, id=prefix.get("uuid"), branch=branch)
51
+ if node:
52
+ return node
53
+
54
+ ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
55
+
56
+ data = data or {}
57
+
58
+ prefixlen = prefixlen or data.get("prefixlen", None) or self.default_prefix_length.value # type: ignore[attr-defined]
59
+ if not prefixlen:
60
+ raise ValueError(
61
+ f"IPPrefixPool: {self.name.value} | " # type: ignore[attr-defined]
62
+ "A prefixlen or a default_value must be provided to allocate a new prefix"
63
+ )
64
+
65
+ next_prefix = await self.get_next(db=db, prefixlen=prefixlen)
66
+
67
+ prefix_type = prefix_type or data.get("prefix_type", None) or self.default_prefix_type.value # type: ignore[attr-defined]
68
+ if not prefix_type:
69
+ raise ValueError(
70
+ f"IPPrefixPool: {self.name.value} | " # type: ignore[attr-defined]
71
+ "A prefix_type or a default_value type must be provided to allocate a new prefix"
72
+ )
73
+
74
+ member_type = member_type or data.get("member_type", None) or self.default_member_type.value.value # type: ignore[attr-defined]
75
+ data["member_type"] = member_type
76
+
77
+ target_schema = registry.get_node_schema(name=prefix_type, branch=branch)
78
+ node = await Node.init(db=db, schema=target_schema, branch=branch, at=at)
79
+ try:
80
+ await node.new(db=db, prefix=str(next_prefix), ip_namespace=ip_namespace, **data)
81
+ except ValidationError as exc:
82
+ raise ValueError(f"IPPrefixPool: {self.name.value} | {exc!s}") from exc # type: ignore[attr-defined]
83
+ await node.save(db=db, at=at)
84
+ reconciler = IpamReconciler(db=db, branch=branch)
85
+ await reconciler.reconcile(ip_value=next_prefix, namespace=ip_namespace.id, node_uuid=node.get_id())
86
+
87
+ if identifier:
88
+ query_set = await PrefixPoolSetReserved.init(
89
+ db=db, pool_id=self.id, identifier=identifier, prefix_id=node.id, at=at
90
+ )
91
+ await query_set.execute(db=db)
92
+
93
+ return node
92
94
 
93
95
  async def get_next(self, db: InfrahubDatabase, prefixlen: int) -> IPNetworkType:
94
96
  resources = await self.resources.get_peers(db=db) # type: ignore[attr-defined]
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from typing import TYPE_CHECKING
4
4
 
5
+ from infrahub import lock
5
6
  from infrahub.core import registry
6
7
  from infrahub.core.query.resource_manager import NumberPoolGetReserved, NumberPoolGetUsed, NumberPoolSetReserved
7
8
  from infrahub.core.schema.attribute_parameters import NumberAttributeParameters
@@ -62,24 +63,25 @@ class CoreNumberPool(Node):
62
63
  identifier: str | None = None,
63
64
  at: Timestamp | None = None,
64
65
  ) -> int:
65
- # NOTE: ideally we should use the HFID as the identifier (if available)
66
- # one of the challenge with using the HFID is that it might change over time
67
- # so we need to ensure that the identifier is stable, or we need to handle the case where the identifier changes
68
- identifier = identifier or node.get_id()
69
-
70
- # Check if there is already a resource allocated with this identifier
71
- # if not, pull all existing number and allocate the next available
72
- # TODO add support for branch, if the node is reserved with this id in another branch we should return an error
73
- query_get = await NumberPoolGetReserved.init(db=db, branch=branch, pool_id=self.id, identifier=identifier)
74
- await query_get.execute(db=db)
75
- reservation = query_get.get_reservation()
76
- if reservation is not None:
77
- return reservation
78
-
79
- # If we have not returned a value we need to find one if avaiable
80
- number = await self.get_next(db=db, branch=branch, attribute=attribute)
81
- await self.reserve(db=db, number=number, identifier=identifier, at=at)
82
- return number
66
+ async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
67
+ # NOTE: ideally we should use the HFID as the identifier (if available)
68
+ # one of the challenge with using the HFID is that it might change over time
69
+ # so we need to ensure that the identifier is stable, or we need to handle the case where the identifier changes
70
+ identifier = identifier or node.get_id()
71
+
72
+ # Check if there is already a resource allocated with this identifier
73
+ # if not, pull all existing number and allocate the next available
74
+ # TODO add support for branch, if the node is reserved with this id in another branch we should return an error
75
+ query_get = await NumberPoolGetReserved.init(db=db, branch=branch, pool_id=self.id, identifier=identifier)
76
+ await query_get.execute(db=db)
77
+ reservation = query_get.get_reservation()
78
+ if reservation is not None:
79
+ return reservation
80
+
81
+ # If we have not returned a value we need to find one if avaiable
82
+ number = await self.get_next(db=db, branch=branch, attribute=attribute)
83
+ await self.reserve(db=db, number=number, identifier=identifier, at=at)
84
+ return number
83
85
 
84
86
  async def get_next(self, db: InfrahubDatabase, branch: Branch, attribute: AttributeSchema) -> int:
85
87
  taken = await self.get_used(db=db, branch=branch)
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
5
  from infrahub import config
6
+ from infrahub.core.constants import GLOBAL_BRANCH_NAME
6
7
  from infrahub.core.query import Query, QueryType
7
8
 
8
9
  if TYPE_CHECKING:
@@ -21,33 +22,49 @@ class DeleteBranchRelationshipsQuery(Query):
21
22
 
22
23
  async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
23
24
  query = """
24
- // delete all relationships on this branch
25
- MATCH (s)-[r1]-(d)
26
- WHERE r1.branch = $branch_name
27
- CALL (r1) {
28
- DELETE r1
25
+ // --------------
26
+ // for every Node created on this branch (it's about to be deleted), find any agnostic relationships
27
+ // connected to the Node and delete them
28
+ // --------------
29
+ OPTIONAL MATCH (:Root)<-[e:IS_PART_OF {status: "active"}]-(n:Node)
30
+ WHERE e.branch = $branch_name
31
+ CALL (n) {
32
+ OPTIONAL MATCH (n)-[:IS_RELATED {branch: $global_branch_name}]-(rel:Relationship)
33
+ DETACH DELETE rel
29
34
  } IN TRANSACTIONS
30
35
 
31
- // check for any orphaned Node vertices and delete them
32
- WITH collect(DISTINCT s.uuid) + collect(DISTINCT d.uuid) AS nodes_uuids
33
- MATCH (s2:Node)-[r2]-(d2)
34
- WHERE NOT exists((s2)-[:IS_PART_OF]-(:Root))
35
- AND s2.uuid IN nodes_uuids
36
- CALL (r2) {
37
- DELETE r2
36
+ // reduce the results to a single row
37
+ WITH 1 AS one
38
+ LIMIT 1
39
+
40
+ // --------------
41
+ // for every edge on this branch, delete it
42
+ // --------------
43
+ MATCH (s)-[r]->(d)
44
+ WHERE r.branch = $branch_name
45
+ CALL (r) {
46
+ DELETE r
38
47
  } IN TRANSACTIONS
39
48
 
40
- // reduce results to a single row
41
- WITH 1 AS one LIMIT 1
42
-
43
- // find any orphaned vertices and delete them
44
- MATCH (n)
45
- WHERE NOT exists((n)--())
46
- CALL (n) {
47
- DELETE n
49
+ // --------------
50
+ // get the database IDs of every vertex linked to a deleted edge
51
+ // --------------
52
+ WITH DISTINCT elementId(s) AS s_id, elementId(d) AS d_id
53
+ WITH collect(s_id) + collect(d_id) AS vertex_ids
54
+ UNWIND vertex_ids AS vertex_id
55
+
56
+ // --------------
57
+ // delete any vertices that are now orphaned
58
+ // --------------
59
+ CALL (vertex_id) {
60
+ MATCH (n)
61
+ WHERE elementId(n) = vertex_id
62
+ AND NOT exists((n)--())
63
+ DELETE n
48
64
  } IN TRANSACTIONS
49
65
  """
50
66
  self.params["branch_name"] = self.branch_name
67
+ self.params["global_branch_name"] = GLOBAL_BRANCH_NAME
51
68
  self.add_to_query(query)
52
69
 
53
70
 
@@ -157,19 +157,21 @@ class NodeCreateAllQuery(NodeQuery):
157
157
  relationships: list[RelationshipCreateData] = []
158
158
  for rel_name in self.node._relationships:
159
159
  rel_manager: RelationshipManager = getattr(self.node, rel_name)
160
- # Fetch all relationship peers through a single database call for performances.
161
- peers = await rel_manager.get_peers(db=db, branch_agnostic=self.branch_agnostic)
160
+ if rel_manager.schema.cardinality == "many":
161
+ # Fetch all relationship peers through a single database call for performances.
162
+ peers = await rel_manager.get_peers(db=db, branch_agnostic=self.branch_agnostic)
162
163
 
163
164
  for rel in rel_manager._relationships:
164
- try:
165
- rel.set_peer(value=peers[rel.get_peer_id()])
166
- except KeyError:
167
- pass
168
- except ValueError:
169
- # Relationship has not been initialized yet, it means the peer does not exist in db yet
170
- # typically because it will be allocated from a ressource pool. In that case, the peer
171
- # will be fetched using `rel.resolve` later.
172
- pass
165
+ if rel_manager.schema.cardinality == "many":
166
+ try:
167
+ rel.set_peer(value=peers[rel.get_peer_id()])
168
+ except KeyError:
169
+ pass
170
+ except ValueError:
171
+ # Relationship has not been initialized yet, it means the peer does not exist in db yet
172
+ # typically because it will be allocated from a ressource pool. In that case, the peer
173
+ # will be fetched using `rel.resolve` later.
174
+ pass
173
175
 
174
176
  rel_create_data = await rel.get_create_data(db=db, at=at)
175
177
  if rel_create_data.peer_branch_level > deepest_branch_level or (
@@ -53,11 +53,6 @@ class NodeSchema(GeneratedNodeSchema):
53
53
  f"{self.kind}.{attribute.name} inherited from {interface.namespace}{interface.name} must be the same kind "
54
54
  f'["{interface_attr.kind}", "{attribute.kind}"]'
55
55
  )
56
- if attribute.optional != interface_attr.optional:
57
- raise ValueError(
58
- f"{self.kind}.{attribute.name} inherited from {interface.namespace}{interface.name} must have the same value for property "
59
- f'"optional" ["{interface_attr.optional}", "{attribute.optional}"]'
60
- )
61
56
 
62
57
  for relationship in self.relationships:
63
58
  if (
@@ -226,8 +226,8 @@ class SchemaBranch:
226
226
  def update(self, schema: SchemaBranch) -> None:
227
227
  """Update another SchemaBranch into this one."""
228
228
 
229
- local_kinds = list(self.nodes.keys()) + list(self.generics.keys())
230
- other_kinds = list(schema.nodes.keys()) + list(schema.generics.keys())
229
+ local_kinds = self.all_names
230
+ other_kinds = schema.all_names
231
231
 
232
232
  in_both, _, other_only = compare_lists(list1=local_kinds, list2=other_kinds)
233
233
 
infrahub/graphql/app.py CHANGED
@@ -231,6 +231,7 @@ class InfrahubGraphQLApp:
231
231
  operation_name=operation_name,
232
232
  branch=branch,
233
233
  )
234
+ impacted_models = analyzed_query.query_report.impacted_models
234
235
 
235
236
  await self._evaluate_permissions(
236
237
  db=db,
@@ -282,7 +283,7 @@ class InfrahubGraphQLApp:
282
283
  GRAPHQL_QUERY_HEIGHT_METRICS.labels(**labels).observe(await analyzed_query.calculate_height())
283
284
  # GRAPHQL_QUERY_VARS_METRICS.labels(**labels).observe(len(analyzed_query.variables))
284
285
  GRAPHQL_TOP_LEVEL_QUERIES_METRICS.labels(**labels).observe(analyzed_query.nbr_queries)
285
- GRAPHQL_QUERY_OBJECTS_METRICS.labels(**labels).observe(len(analyzed_query.query_report.impacted_models))
286
+ GRAPHQL_QUERY_OBJECTS_METRICS.labels(**labels).observe(len(impacted_models))
286
287
 
287
288
  _, errors = analyzed_query.is_valid
288
289
  if errors:
@@ -5,7 +5,7 @@ from typing import TYPE_CHECKING
5
5
  from infrahub.core.constants import GlobalPermissions, InfrahubKind
6
6
  from infrahub.core.manager import NodeManager
7
7
  from infrahub.exceptions import NodeNotFoundError, ValidationError
8
- from infrahub.permissions.globals import define_global_permission_from_branch
8
+ from infrahub.permissions import define_global_permission_from_branch
9
9
 
10
10
  if TYPE_CHECKING:
11
11
  from .initialization import GraphqlContext
@@ -6,7 +6,7 @@ from graphene import Boolean, Field, InputObjectType, Mutation, String
6
6
 
7
7
  from infrahub import lock
8
8
  from infrahub.core import registry
9
- from infrahub.core.constants import RESTRICTED_NAMESPACES
9
+ from infrahub.core.constants import RESTRICTED_NAMESPACES, GlobalPermissions
10
10
  from infrahub.core.manager import NodeManager
11
11
  from infrahub.core.schema import DropdownChoice, GenericSchema, NodeSchema
12
12
  from infrahub.database import InfrahubDatabase, retry_db_transaction
@@ -16,6 +16,7 @@ from infrahub.exceptions import ValidationError
16
16
  from infrahub.graphql.context import apply_external_context
17
17
  from infrahub.graphql.types.context import ContextInput
18
18
  from infrahub.log import get_log_data, get_logger
19
+ from infrahub.permissions import define_global_permission_from_branch
19
20
  from infrahub.worker import WORKER_IDENTITY
20
21
 
21
22
  from ..types import DropdownFields
@@ -32,6 +33,14 @@ if TYPE_CHECKING:
32
33
  log = get_logger()
33
34
 
34
35
 
36
+ def _validate_schema_permission(graphql_context: GraphqlContext) -> None:
37
+ graphql_context.active_permissions.raise_for_permission(
38
+ permission=define_global_permission_from_branch(
39
+ permission=GlobalPermissions.MANAGE_SCHEMA, branch_name=graphql_context.branch.name
40
+ )
41
+ )
42
+
43
+
35
44
  class SchemaEnumInput(InputObjectType):
36
45
  kind = String(required=True)
37
46
  attribute = String(required=True)
@@ -69,6 +78,7 @@ class SchemaDropdownAdd(Mutation):
69
78
  ) -> Self:
70
79
  graphql_context: GraphqlContext = info.context
71
80
 
81
+ _validate_schema_permission(graphql_context=graphql_context)
72
82
  await apply_external_context(graphql_context=graphql_context, context_input=context)
73
83
 
74
84
  kind = graphql_context.db.schema.get(name=str(data.kind), branch=graphql_context.branch.name)
@@ -130,6 +140,7 @@ class SchemaDropdownRemove(Mutation):
130
140
  ) -> dict[str, bool]:
131
141
  graphql_context: GraphqlContext = info.context
132
142
 
143
+ _validate_schema_permission(graphql_context=graphql_context)
133
144
  kind = graphql_context.db.schema.get(name=str(data.kind), branch=graphql_context.branch.name)
134
145
  await apply_external_context(graphql_context=graphql_context, context_input=context)
135
146
 
@@ -185,6 +196,7 @@ class SchemaEnumAdd(Mutation):
185
196
  ) -> dict[str, bool]:
186
197
  graphql_context: GraphqlContext = info.context
187
198
 
199
+ _validate_schema_permission(graphql_context=graphql_context)
188
200
  kind = graphql_context.db.schema.get(name=str(data.kind), branch=graphql_context.branch.name)
189
201
  await apply_external_context(graphql_context=graphql_context, context_input=context)
190
202
 
@@ -230,6 +242,7 @@ class SchemaEnumRemove(Mutation):
230
242
  ) -> dict[str, bool]:
231
243
  graphql_context: GraphqlContext = info.context
232
244
 
245
+ _validate_schema_permission(graphql_context=graphql_context)
233
246
  kind = graphql_context.db.schema.get(name=str(data.kind), branch=graphql_context.branch.name)
234
247
  await apply_external_context(graphql_context=graphql_context, context_input=context)
235
248
 
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from dataclasses import dataclass
3
4
  from typing import TYPE_CHECKING, Any
4
5
 
5
6
  from graphene import Argument, Boolean, DateTime, Field, InputObjectType, Int, List, NonNull, ObjectType, String
@@ -7,7 +8,7 @@ from graphene import Enum as GrapheneEnum
7
8
  from infrahub_sdk.utils import extract_fields
8
9
 
9
10
  from infrahub.core import registry
10
- from infrahub.core.constants import DiffAction, RelationshipCardinality
11
+ from infrahub.core.constants import DiffAction, RelationshipCardinality, RelationshipDirection
11
12
  from infrahub.core.constants.database import DatabaseEdgeType
12
13
  from infrahub.core.diff.model.path import NameTrackingId
13
14
  from infrahub.core.diff.query.filters import EnrichedDiffQueryFilters
@@ -38,6 +39,12 @@ GrapheneDiffActionEnum = GrapheneEnum.from_enum(DiffAction)
38
39
  GrapheneCardinalityEnum = GrapheneEnum.from_enum(RelationshipCardinality)
39
40
 
40
41
 
42
+ @dataclass
43
+ class ParentNodeInfo:
44
+ node: EnrichedDiffNode
45
+ relationship_name: str
46
+
47
+
41
48
  class ConflictDetails(ObjectType):
42
49
  uuid = String(required=True)
43
50
  base_branch_action = Field(GrapheneDiffActionEnum, required=True)
@@ -145,9 +152,16 @@ class DiffTreeSummary(DiffSummaryCounts):
145
152
 
146
153
 
147
154
  class DiffTreeResolver:
155
+ def __init__(self) -> None:
156
+ self.source_branch_name: str | None = None
157
+
158
+ def initialize(self, enriched_diff_root: EnrichedDiffRoot) -> None:
159
+ self.source_branch_name = enriched_diff_root.diff_branch_name
160
+
148
161
  async def to_diff_tree(
149
162
  self, enriched_diff_root: EnrichedDiffRoot, graphql_context: GraphqlContext | None = None
150
163
  ) -> DiffTree:
164
+ self.initialize(enriched_diff_root=enriched_diff_root)
151
165
  all_nodes = list(enriched_diff_root.nodes)
152
166
  tree_nodes = [self.to_diff_node(enriched_node=e_node, graphql_context=graphql_context) for e_node in all_nodes]
153
167
  name = None
@@ -166,6 +180,43 @@ class DiffTreeResolver:
166
180
  num_conflicts=enriched_diff_root.num_conflicts,
167
181
  )
168
182
 
183
+ def _get_parent_info(
184
+ self, diff_node: EnrichedDiffNode, graphql_context: GraphqlContext | None = None
185
+ ) -> ParentNodeInfo | None:
186
+ for r in diff_node.relationships:
187
+ for n in r.nodes:
188
+ relationship_name: str = "undefined"
189
+
190
+ if not graphql_context or not self.source_branch_name:
191
+ return ParentNodeInfo(node=n, relationship_name=relationship_name)
192
+
193
+ node_schema = graphql_context.db.schema.get(
194
+ name=diff_node.kind, branch=self.source_branch_name, duplicate=False
195
+ )
196
+ rel_schema = node_schema.get_relationship(name=r.name)
197
+
198
+ parent_schema = graphql_context.db.schema.get(
199
+ name=n.kind, branch=self.source_branch_name, duplicate=False
200
+ )
201
+ rels_parent = parent_schema.get_relationships_by_identifier(id=rel_schema.get_identifier())
202
+
203
+ if rels_parent and len(rels_parent) == 1:
204
+ relationship_name = rels_parent[0].name
205
+ elif rels_parent and len(rels_parent) > 1:
206
+ for rel_parent in rels_parent:
207
+ if (
208
+ rel_schema.direction == RelationshipDirection.INBOUND
209
+ and rel_parent.direction == RelationshipDirection.OUTBOUND
210
+ ) or (
211
+ rel_schema.direction == RelationshipDirection.OUTBOUND
212
+ and rel_parent.direction == RelationshipDirection.INBOUND
213
+ ):
214
+ relationship_name = rel_parent.name
215
+ break
216
+
217
+ return ParentNodeInfo(node=n, relationship_name=relationship_name)
218
+ return None
219
+
169
220
  def to_diff_node(self, enriched_node: EnrichedDiffNode, graphql_context: GraphqlContext | None = None) -> DiffNode:
170
221
  diff_attributes = [
171
222
  self.to_diff_attribute(enriched_attribute=e_attr, graphql_context=graphql_context)
@@ -181,7 +232,7 @@ class DiffTreeResolver:
181
232
  conflict = self.to_diff_conflict(enriched_conflict=enriched_node.conflict, graphql_context=graphql_context)
182
233
 
183
234
  parent = None
184
- if parent_info := enriched_node.get_parent_info(graphql_context=graphql_context):
235
+ if parent_info := self._get_parent_info(diff_node=enriched_node, graphql_context=graphql_context):
185
236
  parent = DiffNodeParent(
186
237
  uuid=parent_info.node.uuid,
187
238
  kind=parent_info.node.kind,
@@ -21,21 +21,10 @@ from .mutations.diff import DiffUpdateMutation
21
21
  from .mutations.diff_conflict import ResolveDiffConflict
22
22
  from .mutations.generator import GeneratorDefinitionRequestRun
23
23
  from .mutations.proposed_change import ProposedChangeMerge, ProposedChangeRequestRunCheck
24
- from .mutations.relationship import (
25
- RelationshipAdd,
26
- RelationshipRemove,
27
- )
28
- from .mutations.repository import (
29
- ProcessRepository,
30
- ValidateRepositoryConnectivity,
31
- )
24
+ from .mutations.relationship import RelationshipAdd, RelationshipRemove
25
+ from .mutations.repository import ProcessRepository, ValidateRepositoryConnectivity
32
26
  from .mutations.resource_manager import IPAddressPoolGetResource, IPPrefixPoolGetResource
33
- from .mutations.schema import (
34
- SchemaDropdownAdd,
35
- SchemaDropdownRemove,
36
- SchemaEnumAdd,
37
- SchemaEnumRemove,
38
- )
27
+ from .mutations.schema import SchemaDropdownAdd, SchemaDropdownRemove, SchemaEnumAdd, SchemaEnumRemove
39
28
  from .queries import (
40
29
  AccountPermissions,
41
30
  AccountToken,
@@ -1,4 +1,5 @@
1
1
  from infrahub.permissions.backend import PermissionBackend
2
+ from infrahub.permissions.globals import define_global_permission_from_branch
2
3
  from infrahub.permissions.local_backend import LocalPermissionBackend
3
4
  from infrahub.permissions.manager import PermissionManager
4
5
  from infrahub.permissions.report import report_schema_permissions
@@ -9,6 +10,7 @@ __all__ = [
9
10
  "LocalPermissionBackend",
10
11
  "PermissionBackend",
11
12
  "PermissionManager",
13
+ "define_global_permission_from_branch",
12
14
  "get_global_permission_for_kind",
13
15
  "report_schema_permissions",
14
16
  ]
infrahub_sdk/utils.py CHANGED
@@ -142,14 +142,18 @@ def deep_merge_dict(dicta: dict, dictb: dict, path: list | None = None) -> dict:
142
142
  if path is None:
143
143
  path = []
144
144
  for key in dictb:
145
+ b_val = dictb[key]
145
146
  if key in dicta:
146
- if isinstance(dicta[key], dict) and isinstance(dictb[key], dict):
147
- deep_merge_dict(dicta[key], dictb[key], path + [str(key)])
148
- elif isinstance(dicta[key], list) and isinstance(dictb[key], list):
147
+ a_val = dicta[key]
148
+ if isinstance(a_val, dict) and isinstance(b_val, dict):
149
+ deep_merge_dict(a_val, b_val, path + [str(key)])
150
+ elif isinstance(a_val, list) and isinstance(b_val, list):
149
151
  # Merge lists
150
152
  # Cannot use compare_list because list of dicts won't work (dict not hashable)
151
- dicta[key] = [i for i in dicta[key] if i not in dictb[key]] + dictb[key]
152
- elif dicta[key] == dictb[key]:
153
+ dicta[key] = [i for i in a_val if i not in b_val] + b_val
154
+ elif a_val is None and b_val is not None:
155
+ dicta[key] = b_val
156
+ elif a_val == b_val or (a_val is not None and b_val is None):
153
157
  continue
154
158
  else:
155
159
  raise ValueError("Conflict at %s" % ".".join(path + [str(key)]))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: infrahub-server
3
- Version: 1.3.6
3
+ Version: 1.3.8
4
4
  Summary: Infrahub is taking a new approach to Infrastructure Management by providing a new generation of datastore to organize and control all the data that defines how an infrastructure should run.
5
5
  License: Apache-2.0
6
6
  Author: OpsMill
@@ -22,7 +22,7 @@ infrahub/api/menu.py,sha256=xp5bj5JXQZA6ZEPWoTSGGSfTXZ1sVmehMxr3VSG7FlQ,1216
22
22
  infrahub/api/oauth2.py,sha256=wFsWrwfyoNBC1JYzbt1nzU-OjjxWPARIBbE_14jzFmI,5493
23
23
  infrahub/api/oidc.py,sha256=3fU-fNOoMkqEzoLuTmlhCVaZvL6M3sAub8RP1_LvCO8,8299
24
24
  infrahub/api/query.py,sha256=6I95AxNS9O8aIopfObk9hYxlZHawRqESOKjjEDax6-g,7312
25
- infrahub/api/schema.py,sha256=VOgQBxWsYdhS8Qv9Y9UU350GpiJk4NKcYXwMEc7kvQM,17563
25
+ infrahub/api/schema.py,sha256=dUSB51YXaWELZuXYI7UNemd60MJPPBv4m6-MexXOE0k,17450
26
26
  infrahub/api/static/redoc.standalone.js,sha256=77kGx7mVN9EcdER2ZM4gQ-E-ra_N6AZq9QseAeD6kt0,1042008
27
27
  infrahub/api/static/swagger-ui-bundle.js,sha256=wuSp7wgUSDn_R8FCAgY-z-TlnnCk5xVKJr1Q2IDIi6E,1452753
28
28
  infrahub/api/static/swagger-ui.css,sha256=QBcPDuhZ0X-SExunBzKaiKBw5PZodNETZemnfSMvYRc,152071
@@ -108,7 +108,7 @@ infrahub/core/diff/merger/serializer.py,sha256=N5BJ5I2NkB5RtEMuhDfScM7v19PGCbuXC
108
108
  infrahub/core/diff/model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
109
109
  infrahub/core/diff/model/diff.py,sha256=eS2TjZf9aWTZDvQ479t6C6iXXPtmBqulxA2jWxnEDuU,9501
110
110
  infrahub/core/diff/model/field_specifiers_map.py,sha256=59zsMRuYyb9OJEpH9BZQ9kf2DUiDOM3VtuDGvSK6dJQ,2632
111
- infrahub/core/diff/model/path.py,sha256=xPDyDJni_LMoxcZjsge2XorpGZd1WHZztK0OkFj8e2g,30551
111
+ infrahub/core/diff/model/path.py,sha256=lcKTS7hKvu3Rul0k_vxJTrz7G83HhPPedXlthKkr9ag,28844
112
112
  infrahub/core/diff/models.py,sha256=wmOzW4xQ5YreDCr_i56YMFtxbM4-LRgZort49fGJ0BQ,441
113
113
  infrahub/core/diff/parent_node_adder.py,sha256=AFq2KJHGgUVem4WCg-9Qi9h6TTwt-JID1uGYGBrIZxQ,2840
114
114
  infrahub/core/diff/payload_builder.py,sha256=5R_QuPM5P_uQONmTDbtpIjhshs_OJCcXLnVYjWw-78Q,2094
@@ -137,7 +137,7 @@ infrahub/core/diff/repository/deserializer.py,sha256=bhN9ao8HxqKyRz273QGLNV9z9_S
137
137
  infrahub/core/diff/repository/repository.py,sha256=x3QP9VmBVYBOVtf3IZUyzXqCd8sSfmHTqVoYlAOdGao,26006
138
138
  infrahub/core/diff/tasks.py,sha256=7_k-ZNcJZsiDp-xCZvCQfPJjg0xRxpaGTiVVNuRPfBI,3322
139
139
  infrahub/core/enums.py,sha256=qGbhRVoH43Xi0iDkUfWdQiKapJbLT9UKsCobFk_paIk,491
140
- infrahub/core/graph/__init__.py,sha256=lyd7EugUiWNJzdt5RSzKy4LqMNRIgubLAbWyov62LMk,19
140
+ infrahub/core/graph/__init__.py,sha256=p_rrdGMj53zA-RqJP0K9pTThqOxMPg80Yqgor2gc7iA,19
141
141
  infrahub/core/graph/constraints.py,sha256=lmuzrKDFoeSKRiLtycB9PXi6zhMYghczKrPYvfWyy90,10396
142
142
  infrahub/core/graph/index.py,sha256=IHLP-zPRp7HJYLGHMRDRXQp8RC69ztP10Tr5NcL2j4Y,1736
143
143
  infrahub/core/graph/schema.py,sha256=FmEPPb1XOFv3nnS_XJCuUqlp8HsStX5A2frHjlhoqvE,10105
@@ -156,7 +156,7 @@ infrahub/core/ipam/utilization.py,sha256=d-zpXCaWsHgJxBLopCDd7y4sJYvHcIzzpYhbTMI
156
156
  infrahub/core/manager.py,sha256=xMXPwlaGNnghkRUW0ILwJAUlBQJZqo9cGp9GVyqkqYk,47564
157
157
  infrahub/core/merge.py,sha256=2TiPC3fAHkhZCl8RARPzLj_Us47OBGHAp6txgCbWopU,11238
158
158
  infrahub/core/migrations/__init__.py,sha256=syPb3-Irf11dXCHgbT0UdmTnEBbpf4wXJ3m8ADYXDpk,1175
159
- infrahub/core/migrations/graph/__init__.py,sha256=e1lazHQy_gJHb1LdwALeUSTn8-ErZjEIcgeFut3LWtU,3885
159
+ infrahub/core/migrations/graph/__init__.py,sha256=OR4HdUUFQ1nmbTqkagC3BDSYbEtfy29YBgreEVzx5dg,3955
160
160
  infrahub/core/migrations/graph/m001_add_version_to_graph.py,sha256=YcLN6cFjE6IGheXR4Ujb6CcyY8bJ7WE289hcKJaENOc,1515
161
161
  infrahub/core/migrations/graph/m002_attribute_is_default.py,sha256=wB6f2N_ChTvGajqHD-OWCG5ahRMDhhXZuwo79ieq_II,1036
162
162
  infrahub/core/migrations/graph/m003_relationship_parent_optional.py,sha256=Aya-s98XfE9C7YluOwEjilwgnjaBnZxp27w_Xdv_NmU,2330
@@ -189,8 +189,9 @@ infrahub/core/migrations/graph/m029_duplicates_cleanup.py,sha256=DpOwTMzkdi9-kha
189
189
  infrahub/core/migrations/graph/m030_illegal_edges.py,sha256=Saz7QmUqwuLiBtSBdQf54E1Bj3hz0k9KAOQ-pwPBH4g,2797
190
190
  infrahub/core/migrations/graph/m031_check_number_attributes.py,sha256=s3sVoKIkrZAMVZtWWH8baJW42UCAePp5nMUKy5FDSiM,4944
191
191
  infrahub/core/migrations/graph/m032_cleanup_orphaned_branch_relationships.py,sha256=AEc91iCtHWsNvhSuqZGLAn7wL5FWhiqM73OSwIeB7_0,3535
192
- infrahub/core/migrations/graph/m033_deduplicate_relationship_vertices.py,sha256=EHsNyYEPYzqMybgrMefvE9tw-WUWmnh9ZF8FMVRl2wQ,3735
192
+ infrahub/core/migrations/graph/m033_deduplicate_relationship_vertices.py,sha256=YJ0XtOMdfjGPHWtzlMXIm3dX405cTdOoynUFztXVMQI,3735
193
193
  infrahub/core/migrations/graph/m034_find_orphaned_schema_fields.py,sha256=FekohfsamyLNzGBeRBiZML94tz2fUcvTzttfv6mD1cw,3547
194
+ infrahub/core/migrations/graph/m035_orphan_relationships.py,sha256=K0J5gzFF5gY-QMom0tRGDckqw19aN0uSV8AZ8KdKSMo,1371
194
195
  infrahub/core/migrations/query/__init__.py,sha256=JoWOUWlV6IzwxWxObsfCnAAKUOHJkE7dZlOsfB64ZEo,876
195
196
  infrahub/core/migrations/query/attribute_add.py,sha256=LlhkIfVOR3TFSUJEV_4kU5JBKXsWwTsRiX1ySUPe4TU,3655
196
197
  infrahub/core/migrations/query/attribute_rename.py,sha256=onb9Nanht1Tz47JgneAcFsuhqqvPS6dvI2nNjRupLLo,6892
@@ -220,9 +221,9 @@ infrahub/core/node/delete_validator.py,sha256=mj_HQXkTeP_A3po65-R5bCJnDM9CmFFmcU
220
221
  infrahub/core/node/ipam.py,sha256=NWb3TUlVQOGAzq1VvDwISLh61HML0jnalsJ7QojqGwQ,2669
221
222
  infrahub/core/node/permissions.py,sha256=uQzQ62IHcSly6fzPre0nQzlrkCIKzH4HyQkODKB3ZWM,2207
222
223
  infrahub/core/node/resource_manager/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
223
- infrahub/core/node/resource_manager/ip_address_pool.py,sha256=i7N6zEsvJQr1GUi9AH2Cj5HrrII04NNNwd15fgfGSw0,4939
224
- infrahub/core/node/resource_manager/ip_prefix_pool.py,sha256=B-9lyqLlVsgDHIEvs9MP3-xb_GqMuF-khdCmRbGVjU4,5173
225
- infrahub/core/node/resource_manager/number_pool.py,sha256=WjBttd7NPXtmt2kWJQjleZYx6TPXKLHm7oaV3JEzK44,5587
224
+ infrahub/core/node/resource_manager/ip_address_pool.py,sha256=NPCVLTHLdG6BZTJ7MJkqlGCRMkr6V2PyL91ItuoNqpQ,5198
225
+ infrahub/core/node/resource_manager/ip_prefix_pool.py,sha256=SfP0i1aM5tTPU7wKmij4fwzGtmDPeg0s9LRfKMaANOU,5460
226
+ infrahub/core/node/resource_manager/number_pool.py,sha256=QbPIbBJSGy8Et3PlsRXp7ToFZLO2bpQPk1P4PRtunLk,5762
226
227
  infrahub/core/node/standard.py,sha256=gvAY-1UWj4lUc8tqVZ8AqOFhCR5rhR--gI25g5AOD8o,7284
227
228
  infrahub/core/path.py,sha256=CTSnW6OcvnGNqTcOUZcVOMDSB4PLmeGYpY9U84uv9r8,6181
228
229
  infrahub/core/property.py,sha256=rwsqeaIvCMkHfJYl4WfsNPAS7KS0POo5rAN7vAprXGA,5102
@@ -230,11 +231,11 @@ infrahub/core/protocols.py,sha256=BDXKAT4QxMbPFnuRqIdhGJB8VY5jPpCkqdGK_li9fFU,12
230
231
  infrahub/core/protocols_base.py,sha256=cEi6giHtEUmaD0JWfDfWHJhEv_6wjaBA3oJRJCbvc6Q,3411
231
232
  infrahub/core/query/__init__.py,sha256=2qIMaODLwJ6pK6BUd5vODTlA15Aecf5I8_-J44UlCso,23089
232
233
  infrahub/core/query/attribute.py,sha256=DzwbElgTaZs6-nBYGmnDpBr9n0lmUPK3p7eyI30Snh8,11783
233
- infrahub/core/query/branch.py,sha256=B3QEqpwbJrs_8juWQPaHrdwLNJR-1tSkvMuixCFFdt4,3680
234
+ infrahub/core/query/branch.py,sha256=7gj83jDWPWjFUZud7lMQ0xwl9ag3FL-ZOlmY5Kuq7UU,4307
234
235
  infrahub/core/query/delete.py,sha256=7tPP1qtNV6QGYtmgE1RKsuQ9oxENnMTVkttLvJ2PiKg,1927
235
236
  infrahub/core/query/diff.py,sha256=Dc70L5u1wokt106g84QNFJdKhnTTCxmCgAGsBilCgEo,36514
236
237
  infrahub/core/query/ipam.py,sha256=0glfVQmcKqMvNyK4GU_zRl2O9pjl7JBeavyE8VC-De4,28234
237
- infrahub/core/query/node.py,sha256=HXOeT14vCsgpKHp76k-V_VMw7uvVJzFuWY2qBCFZGZk,68317
238
+ infrahub/core/query/node.py,sha256=zOuuMnGhvoOAcePo77gw--15xx56iIROwbuOXkAnpp8,68479
238
239
  infrahub/core/query/relationship.py,sha256=KmS9zrcr-RViXxiITXOjq1t0s-AfsICHk3wyyirZBfA,47817
239
240
  infrahub/core/query/resource_manager.py,sha256=NpxHVayh-HleoRz-bk54z2_PuBHBdU7ng3pCqKWObbo,16584
240
241
  infrahub/core/query/standard_node.py,sha256=mPBXyqk4RzoWRUX4NoojoVi8zk-sJ03GmzmUaWqOgSI,4825
@@ -293,10 +294,10 @@ infrahub/core/schema/generated/node_schema.py,sha256=PMgbQX1PC5ixQsjOFw_bcEfa4tx
293
294
  infrahub/core/schema/generated/relationship_schema.py,sha256=F198_LNmQRV0xSEBPRA3vBAioEItpYZVNApOmdb8_E4,5851
294
295
  infrahub/core/schema/generic_schema.py,sha256=4qXhCm4G_MgDqxZOut_AJwatU4onXBECKeS1UZcusr8,1340
295
296
  infrahub/core/schema/manager.py,sha256=Vz6EJo8pDq9u5apRU7wgFMtcsCHDEt9BHwS0VRlctAc,32776
296
- infrahub/core/schema/node_schema.py,sha256=ld_Wrqf-RsoEUVz_lKE0tcSf5n_oYZYtRI0lTqtd63o,6150
297
+ infrahub/core/schema/node_schema.py,sha256=cc2LQ36Eq_phjK2plfWx4GWLSDznyLcUXMCQmbPIo9Q,5784
297
298
  infrahub/core/schema/profile_schema.py,sha256=cOPSOt5KLgQ0nbqrAN_o33hY_pUtrKmiwSbY_YpVolI,1092
298
299
  infrahub/core/schema/relationship_schema.py,sha256=R-1iC1d70bBW0vWhgJhDB0_J3tRpOqcJmmLzh39NuYs,8501
299
- infrahub/core/schema/schema_branch.py,sha256=Yms2QdNZxqWjtK2sEAgxfRMQmeLEXA16VyqyHErwXgE,106138
300
+ infrahub/core/schema/schema_branch.py,sha256=IGT_pg3KUQcXaKf6fIvrWKzFy4e60u06aDqG0o9001Q,106060
300
301
  infrahub/core/schema/schema_branch_computed.py,sha256=14UUsQJDLMHkYhg7QMqeLiTF3PO8c8rGa90ul3F2ZZo,10629
301
302
  infrahub/core/schema/template_schema.py,sha256=O-PBS9IRM4JX6PxeoyZKwqZ0u0SdQ2zxWMc01PJ2_EA,1084
302
303
  infrahub/core/task/__init__.py,sha256=Ied1NvKGJUDmff27z_-yWW8ArenHxGvSvQTaQyx1iHs,128
@@ -444,7 +445,7 @@ infrahub/graphql/analyzer.py,sha256=TAWo4AWMr33MFjK3YcYBxXSjdwRHxU2HzpIuY9tTHqU,
444
445
  infrahub/graphql/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
445
446
  infrahub/graphql/api/dependencies.py,sha256=-NMUA_N4tWcVpS6ksCebAyza-JTmHqyYY_QZizgBR1c,1690
446
447
  infrahub/graphql/api/endpoints.py,sha256=wH9eO3CFT-eoSe1Y32BhU9mIf6smEnPeP3tAxZkdt4g,1510
447
- infrahub/graphql/app.py,sha256=zjlsZxkYRqye9yL0c1Y69QcBMr4mwgTu_PdVxyEUlG8,21135
448
+ infrahub/graphql/app.py,sha256=ZoxFravB96Ax3DrQZM0MaK-OsWkFk6Rb-bao7IEE6TI,21177
448
449
  infrahub/graphql/auth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
449
450
  infrahub/graphql/auth/query_permission_checker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
450
451
  infrahub/graphql/auth/query_permission_checker/anonymous_checker.py,sha256=ibsmGyOelLJbN2Kfkmffv-5D79h7tRc1Fez5tauFY8w,1377
@@ -455,7 +456,7 @@ infrahub/graphql/auth/query_permission_checker/merge_operation_checker.py,sha256
455
456
  infrahub/graphql/auth/query_permission_checker/object_permission_checker.py,sha256=5Af8bwtG5I-jxPQGOG_-qKV9bQFECn27e_gBoYDxXrs,8408
456
457
  infrahub/graphql/auth/query_permission_checker/super_admin_checker.py,sha256=2RlJ1G-BmJIQW33SletzK1gIQ3nyEB2edTiX0xAjR2E,1550
457
458
  infrahub/graphql/constants.py,sha256=iVvo3HK-ch7YmHw1Eg2E_ja3I45cNAwjpYahsnu85CI,37
458
- infrahub/graphql/context.py,sha256=p-d6LdUBV0kV3yps3MJD09NFAet2Ti_YTO84EKCzV4o,1689
459
+ infrahub/graphql/context.py,sha256=ahp-MvX_0glg9mSPbPVhEwvbYzrIKtaEAGt7CVnAusE,1681
459
460
  infrahub/graphql/directives.py,sha256=wyIkJFp7l0J4JqNl1Lqu7YfKXP7glrewlQFMDTUAPcE,645
460
461
  infrahub/graphql/enums.py,sha256=9F0XWfjQpC__0YRccYG1T-3qL1V8_PmlRlVpU1-n7nQ,820
461
462
  infrahub/graphql/initialization.py,sha256=e97vYE7lQZm7OJxJrhKA6kdxKJ4QOcVbTpoNHq9fweM,4446
@@ -491,7 +492,7 @@ infrahub/graphql/mutations/proposed_change.py,sha256=4y9YTE6f9Rqk_TC2K_uued1bECt
491
492
  infrahub/graphql/mutations/relationship.py,sha256=9LqEPrziruXGBY3Ywquj8yNbA_HB_v2PmGELI6NWBFM,21596
492
493
  infrahub/graphql/mutations/repository.py,sha256=Whrt1uYWt7Ro6omJYN8zc3D-poZ6bOBrpBHIG4odAmo,11316
493
494
  infrahub/graphql/mutations/resource_manager.py,sha256=DvnmfXmS9bNYXjtgedGTKPdJmtdaCbM5qxl0OJ-t1yQ,11342
494
- infrahub/graphql/mutations/schema.py,sha256=vOwP8SIcQxamhP_JwbeXPG5iOEwxHhHawgqU6bD-4us,12897
495
+ infrahub/graphql/mutations/schema.py,sha256=6N70sGn-g5LNyfWwJZ-gonjbCY-Kc1IOukof3Np6pQQ,13568
495
496
  infrahub/graphql/mutations/tasks.py,sha256=IEiT27e6SRJ56OEznWE3r03JfQmyEdxCYBBaVEuHVLU,3898
496
497
  infrahub/graphql/mutations/webhook.py,sha256=IW_WPpBRySd-mpbkuGnR28VpU9naM2bLZBjJOaAGuH4,4777
497
498
  infrahub/graphql/parser.py,sha256=Du1003gL9Bq5niPZE0PT5zB5Sq9Ub2qWJaqf1SnVVck,8603
@@ -501,7 +502,7 @@ infrahub/graphql/queries/account.py,sha256=VB3HtLXf8s7VJxoA4G0ISBvn9hkQ9oTavKfRw
501
502
  infrahub/graphql/queries/branch.py,sha256=hEZF8xJHyXUOQOkWrfjbfrVhIrK70vKMeBGaLLnHQGY,792
502
503
  infrahub/graphql/queries/convert_object_type_mapping.py,sha256=zLav6Eod0OqLgj4PY7q8fCUE-idYYHFQXf_G-siAgyI,1169
503
504
  infrahub/graphql/queries/diff/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
504
- infrahub/graphql/queries/diff/tree.py,sha256=4XcHIMDtJLA6nDBzGNn2WR_HeY_7lrmIU38CdK_qBIc,23026
505
+ infrahub/graphql/queries/diff/tree.py,sha256=DcO8cRd-V3Z66tYYgiNa0CqNW8Z8JwFVJk-0DP3pME4,25278
505
506
  infrahub/graphql/queries/event.py,sha256=9kHi37WmM4bwGgnIPaPLVOaXp124tn10v60kNx5C7aU,4204
506
507
  infrahub/graphql/queries/internal.py,sha256=pcGLpLrY1fC_HxHNs8NAFjr5FTFzcgRlS1F7b65gqfE,647
507
508
  infrahub/graphql/queries/ipam.py,sha256=peN--58IhLgS06O44AEthefEkaVDc7f38Sib3JyGKu4,4106
@@ -514,7 +515,7 @@ infrahub/graphql/resolvers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
514
515
  infrahub/graphql/resolvers/many_relationship.py,sha256=5nrb6k6JQYubfGGXnU26XyCACaa1r040uU7N4faDfLQ,9765
515
516
  infrahub/graphql/resolvers/resolver.py,sha256=CCavWqKZ9OfdZRJ57yw3hE320rIubfYF80aQ7Y3lGdo,11934
516
517
  infrahub/graphql/resolvers/single_relationship.py,sha256=Fh8NJYfxWGSIddJsm7PpAacutY_UN_bdMQ3GdvA9W00,6978
517
- infrahub/graphql/schema.py,sha256=oMenIbYKYu7m8j5i36jtrctlsusbqvADtEjzfMCORqg,3943
518
+ infrahub/graphql/schema.py,sha256=BAJIKGNjF26uxqdoouAdCNVbaA3IaAGZtSmb45aNsXM,3896
518
519
  infrahub/graphql/subscription/__init__.py,sha256=rVgLryqg-kbzkd3Dywb1gMPsthR8wFqB7nluuRKKfrE,1154
519
520
  infrahub/graphql/subscription/events.py,sha256=tDg9fy66dLmbXaf_9YC-3LmC1sqsj-smbq_LOsHdZ5Y,1838
520
521
  infrahub/graphql/subscription/graphql_query.py,sha256=U9PwREMghxbuIoGWh3_rV33wKPzDyMILZ8_tuniwukg,2266
@@ -582,7 +583,7 @@ infrahub/patch/runner.py,sha256=ZB4aOqlG77hJNtDyQtIXmi-2WgM07WSEFtWV2NItIqk,1259
582
583
  infrahub/patch/vertex_adder.py,sha256=lhWELYWlHwkopGOECSHRfj1mb0-COheibsu95r2Hwzs,2796
583
584
  infrahub/patch/vertex_deleter.py,sha256=czdb8T30k_-WSbcZUVS2-DvaN3Dp4j9ss2lAz8KN0mo,1302
584
585
  infrahub/patch/vertex_updater.py,sha256=FxQJEnwXdvj2WtwLorRbRAyocWUG9z_RDowflVKqPoU,1136
585
- infrahub/permissions/__init__.py,sha256=WAtFhyaQj8dFkZJGnIbBaVbSMttGZGgK18V-QbMNVNU,538
586
+ infrahub/permissions/__init__.py,sha256=-aOESPa_x7u3BkO3YxABjlzjZ1Eeuec2wN21i1Law_U,660
586
587
  infrahub/permissions/backend.py,sha256=azvyFOTne0Zy1yrc4t9u3GCkHI_x_OPSDV65yxmVPDQ,529
587
588
  infrahub/permissions/constants.py,sha256=2sGj9caif_aH2XtD3s35BU4HRONOjRHCyCJ3gbT5kZs,1221
588
589
  infrahub/permissions/globals.py,sha256=7PT-At0wzy2kdyOhheW05XE0G4EGo76j4BAoB8SRJbg,585
@@ -793,7 +794,7 @@ infrahub_sdk/transfer/importer/json.py,sha256=-Tlmg22TiBrEqXOSLMnUzlCFOZ2M0Q8lWy
793
794
  infrahub_sdk/transfer/schema_sorter.py,sha256=ZoBjJGFT-6jQoKOLaoOPMAWzs7vGOeo7x6zOOP4LNv0,1244
794
795
  infrahub_sdk/transforms.py,sha256=RLiB_CkM-JQSfyifChxxQVl2FrHKOGEf_YynSMKeFZU,2340
795
796
  infrahub_sdk/types.py,sha256=UeZ1rDp4eyH12ApTcUD9a1OOtCp3IL1YZUeeZ06qF-I,1726
796
- infrahub_sdk/utils.py,sha256=dkNqnMEzyPORQ6mw90mH3Qge1fkIclcuQ5kmxND1JAg,11748
797
+ infrahub_sdk/utils.py,sha256=zNoBV8nND10j_OQLmt_Sdz_8_vMgw9qQfpsjdm8O-0s,11881
797
798
  infrahub_sdk/uuidt.py,sha256=Tz-4nHkJwbi39UT3gaIe2wJeZNAoBqf6tm3sw7LZbXc,2155
798
799
  infrahub_sdk/yaml.py,sha256=PRsS7BEM-Xn5wRLAAG-YLTGRBEJy5Dnyim2YskFfe8I,5539
799
800
  infrahub_testcontainers/__init__.py,sha256=oPpmesGgYBSdKTg1L37FGwYBeao1EHury5SJGul-CT8,216
@@ -809,8 +810,8 @@ infrahub_testcontainers/models.py,sha256=ASYyvl7d_WQz_i7y8-3iab9hwwmCl3OCJavqVbe
809
810
  infrahub_testcontainers/performance_test.py,sha256=hvwiy6tc_lWniYqGkqfOXVGAmA_IV15VOZqbiD9ezno,6149
810
811
  infrahub_testcontainers/plugin.py,sha256=I3RuZQ0dARyKHuqCf0y1Yj731P2Mwf3BJUehRJKeWrs,5645
811
812
  infrahub_testcontainers/prometheus.yml,sha256=610xQEyj3xuVJMzPkC4m1fRnCrjGpiRBrXA2ytCLa54,599
812
- infrahub_server-1.3.6.dist-info/LICENSE.txt,sha256=7GQO7kxVoQYnZtFrjZBKLRXbrGwwwimHPPOJtqXsozQ,11340
813
- infrahub_server-1.3.6.dist-info/METADATA,sha256=9at1iBSv1D91S_nPEQJJ29jVjUyibVPCxUGzze34eAs,8189
814
- infrahub_server-1.3.6.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
815
- infrahub_server-1.3.6.dist-info/entry_points.txt,sha256=UXIeFWDsrV-4IllNvUEd6KieYGzQfn9paga2YyABOQI,393
816
- infrahub_server-1.3.6.dist-info/RECORD,,
813
+ infrahub_server-1.3.8.dist-info/LICENSE.txt,sha256=7GQO7kxVoQYnZtFrjZBKLRXbrGwwwimHPPOJtqXsozQ,11340
814
+ infrahub_server-1.3.8.dist-info/METADATA,sha256=AM93SvBG4Wl4D53wWQ9ktQPK0ZqIa87Osu0e0EWFOBg,8189
815
+ infrahub_server-1.3.8.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
816
+ infrahub_server-1.3.8.dist-info/entry_points.txt,sha256=UXIeFWDsrV-4IllNvUEd6KieYGzQfn9paga2YyABOQI,393
817
+ infrahub_server-1.3.8.dist-info/RECORD,,