infrahub-server 1.3.5__py3-none-any.whl → 1.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/api/schema.py +3 -7
- infrahub/cli/db.py +25 -0
- infrahub/cli/db_commands/__init__.py +0 -0
- infrahub/cli/db_commands/check_inheritance.py +284 -0
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/manager.py +2 -2
- infrahub/core/migrations/graph/__init__.py +2 -0
- infrahub/core/migrations/graph/m033_deduplicate_relationship_vertices.py +1 -1
- infrahub/core/migrations/graph/m035_orphan_relationships.py +43 -0
- infrahub/core/migrations/query/node_duplicate.py +26 -3
- infrahub/core/node/resource_manager/ip_address_pool.py +50 -48
- infrahub/core/node/resource_manager/ip_prefix_pool.py +55 -53
- infrahub/core/node/resource_manager/number_pool.py +20 -18
- infrahub/core/query/branch.py +37 -20
- infrahub/core/query/node.py +15 -0
- infrahub/core/relationship/model.py +13 -13
- infrahub/graphql/context.py +1 -1
- infrahub/graphql/mutations/relationship.py +1 -1
- infrahub/graphql/mutations/schema.py +14 -1
- infrahub/graphql/schema.py +3 -14
- infrahub/permissions/__init__.py +2 -0
- infrahub_sdk/utils.py +9 -5
- {infrahub_server-1.3.5.dist-info → infrahub_server-1.3.7.dist-info}/METADATA +1 -1
- {infrahub_server-1.3.5.dist-info → infrahub_server-1.3.7.dist-info}/RECORD +27 -24
- {infrahub_server-1.3.5.dist-info → infrahub_server-1.3.7.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.3.5.dist-info → infrahub_server-1.3.7.dist-info}/WHEEL +0 -0
- {infrahub_server-1.3.5.dist-info → infrahub_server-1.3.7.dist-info}/entry_points.txt +0 -0
|
@@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Any
|
|
|
5
5
|
|
|
6
6
|
from netaddr import IPSet
|
|
7
7
|
|
|
8
|
+
from infrahub import lock
|
|
8
9
|
from infrahub.core import registry
|
|
9
10
|
from infrahub.core.ipam.reconciler import IpamReconciler
|
|
10
11
|
from infrahub.core.query.ipam import get_subnets
|
|
@@ -36,59 +37,60 @@ class CoreIPPrefixPool(Node):
|
|
|
36
37
|
prefix_type: str | None = None,
|
|
37
38
|
at: Timestamp | None = None,
|
|
38
39
|
) -> Node:
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
40
|
+
async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
|
|
41
|
+
# Check if there is already a resource allocated with this identifier
|
|
42
|
+
# if not, pull all existing prefixes and allocated the next available
|
|
43
|
+
if identifier:
|
|
44
|
+
query_get = await PrefixPoolGetReserved.init(db=db, pool_id=self.id, identifier=identifier)
|
|
45
|
+
await query_get.execute(db=db)
|
|
46
|
+
result = query_get.get_result()
|
|
47
|
+
if result:
|
|
48
|
+
prefix = result.get_node("prefix")
|
|
49
|
+
# TODO add support for branch, if the node is reserved with this id in another branch we should return an error
|
|
50
|
+
node = await registry.manager.get_one(db=db, id=prefix.get("uuid"), branch=branch)
|
|
51
|
+
if node:
|
|
52
|
+
return node
|
|
53
|
+
|
|
54
|
+
ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
|
|
55
|
+
|
|
56
|
+
data = data or {}
|
|
57
|
+
|
|
58
|
+
prefixlen = prefixlen or data.get("prefixlen", None) or self.default_prefix_length.value # type: ignore[attr-defined]
|
|
59
|
+
if not prefixlen:
|
|
60
|
+
raise ValueError(
|
|
61
|
+
f"IPPrefixPool: {self.name.value} | " # type: ignore[attr-defined]
|
|
62
|
+
"A prefixlen or a default_value must be provided to allocate a new prefix"
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
next_prefix = await self.get_next(db=db, prefixlen=prefixlen)
|
|
66
|
+
|
|
67
|
+
prefix_type = prefix_type or data.get("prefix_type", None) or self.default_prefix_type.value # type: ignore[attr-defined]
|
|
68
|
+
if not prefix_type:
|
|
69
|
+
raise ValueError(
|
|
70
|
+
f"IPPrefixPool: {self.name.value} | " # type: ignore[attr-defined]
|
|
71
|
+
"A prefix_type or a default_value type must be provided to allocate a new prefix"
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
member_type = member_type or data.get("member_type", None) or self.default_member_type.value.value # type: ignore[attr-defined]
|
|
75
|
+
data["member_type"] = member_type
|
|
76
|
+
|
|
77
|
+
target_schema = registry.get_node_schema(name=prefix_type, branch=branch)
|
|
78
|
+
node = await Node.init(db=db, schema=target_schema, branch=branch, at=at)
|
|
79
|
+
try:
|
|
80
|
+
await node.new(db=db, prefix=str(next_prefix), ip_namespace=ip_namespace, **data)
|
|
81
|
+
except ValidationError as exc:
|
|
82
|
+
raise ValueError(f"IPPrefixPool: {self.name.value} | {exc!s}") from exc # type: ignore[attr-defined]
|
|
83
|
+
await node.save(db=db, at=at)
|
|
84
|
+
reconciler = IpamReconciler(db=db, branch=branch)
|
|
85
|
+
await reconciler.reconcile(ip_value=next_prefix, namespace=ip_namespace.id, node_uuid=node.get_id())
|
|
86
|
+
|
|
87
|
+
if identifier:
|
|
88
|
+
query_set = await PrefixPoolSetReserved.init(
|
|
89
|
+
db=db, pool_id=self.id, identifier=identifier, prefix_id=node.id, at=at
|
|
90
|
+
)
|
|
91
|
+
await query_set.execute(db=db)
|
|
92
|
+
|
|
93
|
+
return node
|
|
92
94
|
|
|
93
95
|
async def get_next(self, db: InfrahubDatabase, prefixlen: int) -> IPNetworkType:
|
|
94
96
|
resources = await self.resources.get_peers(db=db) # type: ignore[attr-defined]
|
|
@@ -2,6 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
from typing import TYPE_CHECKING
|
|
4
4
|
|
|
5
|
+
from infrahub import lock
|
|
5
6
|
from infrahub.core import registry
|
|
6
7
|
from infrahub.core.query.resource_manager import NumberPoolGetReserved, NumberPoolGetUsed, NumberPoolSetReserved
|
|
7
8
|
from infrahub.core.schema.attribute_parameters import NumberAttributeParameters
|
|
@@ -62,24 +63,25 @@ class CoreNumberPool(Node):
|
|
|
62
63
|
identifier: str | None = None,
|
|
63
64
|
at: Timestamp | None = None,
|
|
64
65
|
) -> int:
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
66
|
+
async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
|
|
67
|
+
# NOTE: ideally we should use the HFID as the identifier (if available)
|
|
68
|
+
# one of the challenge with using the HFID is that it might change over time
|
|
69
|
+
# so we need to ensure that the identifier is stable, or we need to handle the case where the identifier changes
|
|
70
|
+
identifier = identifier or node.get_id()
|
|
71
|
+
|
|
72
|
+
# Check if there is already a resource allocated with this identifier
|
|
73
|
+
# if not, pull all existing number and allocate the next available
|
|
74
|
+
# TODO add support for branch, if the node is reserved with this id in another branch we should return an error
|
|
75
|
+
query_get = await NumberPoolGetReserved.init(db=db, branch=branch, pool_id=self.id, identifier=identifier)
|
|
76
|
+
await query_get.execute(db=db)
|
|
77
|
+
reservation = query_get.get_reservation()
|
|
78
|
+
if reservation is not None:
|
|
79
|
+
return reservation
|
|
80
|
+
|
|
81
|
+
# If we have not returned a value we need to find one if avaiable
|
|
82
|
+
number = await self.get_next(db=db, branch=branch, attribute=attribute)
|
|
83
|
+
await self.reserve(db=db, number=number, identifier=identifier, at=at)
|
|
84
|
+
return number
|
|
83
85
|
|
|
84
86
|
async def get_next(self, db: InfrahubDatabase, branch: Branch, attribute: AttributeSchema) -> int:
|
|
85
87
|
taken = await self.get_used(db=db, branch=branch)
|
infrahub/core/query/branch.py
CHANGED
|
@@ -3,6 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
from typing import TYPE_CHECKING, Any
|
|
4
4
|
|
|
5
5
|
from infrahub import config
|
|
6
|
+
from infrahub.core.constants import GLOBAL_BRANCH_NAME
|
|
6
7
|
from infrahub.core.query import Query, QueryType
|
|
7
8
|
|
|
8
9
|
if TYPE_CHECKING:
|
|
@@ -21,33 +22,49 @@ class DeleteBranchRelationshipsQuery(Query):
|
|
|
21
22
|
|
|
22
23
|
async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
|
|
23
24
|
query = """
|
|
24
|
-
//
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
25
|
+
// --------------
|
|
26
|
+
// for every Node created on this branch (it's about to be deleted), find any agnostic relationships
|
|
27
|
+
// connected to the Node and delete them
|
|
28
|
+
// --------------
|
|
29
|
+
OPTIONAL MATCH (:Root)<-[e:IS_PART_OF {status: "active"}]-(n:Node)
|
|
30
|
+
WHERE e.branch = $branch_name
|
|
31
|
+
CALL (n) {
|
|
32
|
+
OPTIONAL MATCH (n)-[:IS_RELATED {branch: $global_branch_name}]-(rel:Relationship)
|
|
33
|
+
DETACH DELETE rel
|
|
29
34
|
} IN TRANSACTIONS
|
|
30
35
|
|
|
31
|
-
//
|
|
32
|
-
WITH
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
36
|
+
// reduce the results to a single row
|
|
37
|
+
WITH 1 AS one
|
|
38
|
+
LIMIT 1
|
|
39
|
+
|
|
40
|
+
// --------------
|
|
41
|
+
// for every edge on this branch, delete it
|
|
42
|
+
// --------------
|
|
43
|
+
MATCH (s)-[r]->(d)
|
|
44
|
+
WHERE r.branch = $branch_name
|
|
45
|
+
CALL (r) {
|
|
46
|
+
DELETE r
|
|
38
47
|
} IN TRANSACTIONS
|
|
39
48
|
|
|
40
|
-
//
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
49
|
+
// --------------
|
|
50
|
+
// get the database IDs of every vertex linked to a deleted edge
|
|
51
|
+
// --------------
|
|
52
|
+
WITH DISTINCT elementId(s) AS s_id, elementId(d) AS d_id
|
|
53
|
+
WITH collect(s_id) + collect(d_id) AS vertex_ids
|
|
54
|
+
UNWIND vertex_ids AS vertex_id
|
|
55
|
+
|
|
56
|
+
// --------------
|
|
57
|
+
// delete any vertices that are now orphaned
|
|
58
|
+
// --------------
|
|
59
|
+
CALL (vertex_id) {
|
|
60
|
+
MATCH (n)
|
|
61
|
+
WHERE elementId(n) = vertex_id
|
|
62
|
+
AND NOT exists((n)--())
|
|
63
|
+
DELETE n
|
|
48
64
|
} IN TRANSACTIONS
|
|
49
65
|
"""
|
|
50
66
|
self.params["branch_name"] = self.branch_name
|
|
67
|
+
self.params["global_branch_name"] = GLOBAL_BRANCH_NAME
|
|
51
68
|
self.add_to_query(query)
|
|
52
69
|
|
|
53
70
|
|
infrahub/core/query/node.py
CHANGED
|
@@ -157,7 +157,22 @@ class NodeCreateAllQuery(NodeQuery):
|
|
|
157
157
|
relationships: list[RelationshipCreateData] = []
|
|
158
158
|
for rel_name in self.node._relationships:
|
|
159
159
|
rel_manager: RelationshipManager = getattr(self.node, rel_name)
|
|
160
|
+
if rel_manager.schema.cardinality == "many":
|
|
161
|
+
# Fetch all relationship peers through a single database call for performances.
|
|
162
|
+
peers = await rel_manager.get_peers(db=db, branch_agnostic=self.branch_agnostic)
|
|
163
|
+
|
|
160
164
|
for rel in rel_manager._relationships:
|
|
165
|
+
if rel_manager.schema.cardinality == "many":
|
|
166
|
+
try:
|
|
167
|
+
rel.set_peer(value=peers[rel.get_peer_id()])
|
|
168
|
+
except KeyError:
|
|
169
|
+
pass
|
|
170
|
+
except ValueError:
|
|
171
|
+
# Relationship has not been initialized yet, it means the peer does not exist in db yet
|
|
172
|
+
# typically because it will be allocated from a ressource pool. In that case, the peer
|
|
173
|
+
# will be fetched using `rel.resolve` later.
|
|
174
|
+
pass
|
|
175
|
+
|
|
161
176
|
rel_create_data = await rel.get_create_data(db=db, at=at)
|
|
162
177
|
if rel_create_data.peer_branch_level > deepest_branch_level or (
|
|
163
178
|
deepest_branch_name == GLOBAL_BRANCH_NAME and rel_create_data.peer_branch == registry.default_branch
|
|
@@ -166,11 +166,11 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
|
|
|
166
166
|
return registry.get_global_branch()
|
|
167
167
|
return self.branch
|
|
168
168
|
|
|
169
|
-
|
|
169
|
+
def _process_data(self, data: dict | RelationshipPeerData | str) -> None:
|
|
170
170
|
self.data = data
|
|
171
171
|
|
|
172
172
|
if isinstance(data, RelationshipPeerData):
|
|
173
|
-
|
|
173
|
+
self.set_peer(value=str(data.peer_id))
|
|
174
174
|
|
|
175
175
|
if not self.id and data.rel_node_id:
|
|
176
176
|
self.id = data.rel_node_id
|
|
@@ -187,7 +187,7 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
|
|
|
187
187
|
elif isinstance(data, dict):
|
|
188
188
|
for key, value in data.items():
|
|
189
189
|
if key in ["peer", "id"]:
|
|
190
|
-
|
|
190
|
+
self.set_peer(value=data.get(key, None))
|
|
191
191
|
elif key == "hfid" and self.peer_id is None:
|
|
192
192
|
self.peer_hfid = value
|
|
193
193
|
elif key.startswith(PREFIX_PROPERTY) and key.replace(PREFIX_PROPERTY, "") in self._flag_properties:
|
|
@@ -198,7 +198,7 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
|
|
|
198
198
|
self.from_pool = value
|
|
199
199
|
|
|
200
200
|
else:
|
|
201
|
-
|
|
201
|
+
self.set_peer(value=data)
|
|
202
202
|
|
|
203
203
|
async def new(
|
|
204
204
|
self,
|
|
@@ -206,11 +206,11 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
|
|
|
206
206
|
data: dict | RelationshipPeerData | Any = None,
|
|
207
207
|
**kwargs: Any, # noqa: ARG002
|
|
208
208
|
) -> Relationship:
|
|
209
|
-
|
|
209
|
+
self._process_data(data=data)
|
|
210
210
|
|
|
211
211
|
return self
|
|
212
212
|
|
|
213
|
-
|
|
213
|
+
def load(
|
|
214
214
|
self,
|
|
215
215
|
db: InfrahubDatabase, # noqa: ARG002
|
|
216
216
|
id: UUID | None = None,
|
|
@@ -223,7 +223,7 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
|
|
|
223
223
|
self.id = id or self.id
|
|
224
224
|
self.db_id = db_id or self.db_id
|
|
225
225
|
|
|
226
|
-
|
|
226
|
+
self._process_data(data=data)
|
|
227
227
|
|
|
228
228
|
if updated_at and hash(self) != hash_before:
|
|
229
229
|
self.updated_at = Timestamp(updated_at)
|
|
@@ -252,7 +252,7 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
|
|
|
252
252
|
self._node_id = self._node.id
|
|
253
253
|
return node
|
|
254
254
|
|
|
255
|
-
|
|
255
|
+
def set_peer(self, value: str | Node) -> None:
|
|
256
256
|
if isinstance(value, str):
|
|
257
257
|
self.peer_id = value
|
|
258
258
|
else:
|
|
@@ -433,7 +433,7 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
|
|
|
433
433
|
db=db, id=self.peer_id, branch=self.branch, kind=self.schema.peer, fields={"display_label": None}
|
|
434
434
|
)
|
|
435
435
|
if peer:
|
|
436
|
-
|
|
436
|
+
self.set_peer(value=peer)
|
|
437
437
|
|
|
438
438
|
if not self.peer_id and self.peer_hfid:
|
|
439
439
|
peer_schema = db.schema.get(name=self.schema.peer, branch=self.branch)
|
|
@@ -450,7 +450,7 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
|
|
|
450
450
|
fields={"display_label": None},
|
|
451
451
|
raise_on_error=True,
|
|
452
452
|
)
|
|
453
|
-
|
|
453
|
+
self.set_peer(value=peer)
|
|
454
454
|
|
|
455
455
|
if not self.peer_id and self.from_pool and "id" in self.from_pool:
|
|
456
456
|
pool_id = str(self.from_pool.get("id"))
|
|
@@ -473,7 +473,7 @@ class Relationship(FlagPropertyMixin, NodePropertyMixin):
|
|
|
473
473
|
data_from_pool["identifier"] = f"hfid={hfid_str} rel={self.name}"
|
|
474
474
|
|
|
475
475
|
assigned_peer: Node = await pool.get_resource(db=db, branch=self.branch, at=at, **data_from_pool) # type: ignore[attr-defined]
|
|
476
|
-
|
|
476
|
+
self.set_peer(value=assigned_peer)
|
|
477
477
|
self.set_source(value=pool.id)
|
|
478
478
|
|
|
479
479
|
async def save(self, db: InfrahubDatabase, at: Timestamp | None = None) -> Self:
|
|
@@ -962,7 +962,7 @@ class RelationshipManager:
|
|
|
962
962
|
|
|
963
963
|
for peer_id in details.peer_ids_present_database_only:
|
|
964
964
|
self._relationships.append(
|
|
965
|
-
|
|
965
|
+
Relationship(
|
|
966
966
|
schema=self.schema,
|
|
967
967
|
branch=self.branch,
|
|
968
968
|
at=at or self.at,
|
|
@@ -1050,7 +1050,7 @@ class RelationshipManager:
|
|
|
1050
1050
|
if isinstance(item, dict) and item.get("id", None) in previous_relationships:
|
|
1051
1051
|
rel = previous_relationships[item["id"]]
|
|
1052
1052
|
hash_before = hash(rel)
|
|
1053
|
-
|
|
1053
|
+
rel.load(data=item, db=db)
|
|
1054
1054
|
if hash(rel) != hash_before:
|
|
1055
1055
|
changed = True
|
|
1056
1056
|
self._relationships.append(rel)
|
infrahub/graphql/context.py
CHANGED
|
@@ -5,7 +5,7 @@ from typing import TYPE_CHECKING
|
|
|
5
5
|
from infrahub.core.constants import GlobalPermissions, InfrahubKind
|
|
6
6
|
from infrahub.core.manager import NodeManager
|
|
7
7
|
from infrahub.exceptions import NodeNotFoundError, ValidationError
|
|
8
|
-
from infrahub.permissions
|
|
8
|
+
from infrahub.permissions import define_global_permission_from_branch
|
|
9
9
|
|
|
10
10
|
if TYPE_CHECKING:
|
|
11
11
|
from .initialization import GraphqlContext
|
|
@@ -233,7 +233,7 @@ class RelationshipRemove(Mutation):
|
|
|
233
233
|
# we should use RelationshipDataDeleteQuery to delete the relationship
|
|
234
234
|
# it would be more query efficient
|
|
235
235
|
rel = Relationship(schema=rel_schema, branch=graphql_context.branch, node=source)
|
|
236
|
-
|
|
236
|
+
rel.load(db=db, data=existing_peers[node_data.get("id")])
|
|
237
237
|
if group_event_type != GroupUpdateType.NONE:
|
|
238
238
|
peers.append(EventNode(id=rel.get_peer_id(), kind=nodes[rel.get_peer_id()].get_kind()))
|
|
239
239
|
node_changelog.delete_relationship(relationship=rel)
|
|
@@ -6,7 +6,7 @@ from graphene import Boolean, Field, InputObjectType, Mutation, String
|
|
|
6
6
|
|
|
7
7
|
from infrahub import lock
|
|
8
8
|
from infrahub.core import registry
|
|
9
|
-
from infrahub.core.constants import RESTRICTED_NAMESPACES
|
|
9
|
+
from infrahub.core.constants import RESTRICTED_NAMESPACES, GlobalPermissions
|
|
10
10
|
from infrahub.core.manager import NodeManager
|
|
11
11
|
from infrahub.core.schema import DropdownChoice, GenericSchema, NodeSchema
|
|
12
12
|
from infrahub.database import InfrahubDatabase, retry_db_transaction
|
|
@@ -16,6 +16,7 @@ from infrahub.exceptions import ValidationError
|
|
|
16
16
|
from infrahub.graphql.context import apply_external_context
|
|
17
17
|
from infrahub.graphql.types.context import ContextInput
|
|
18
18
|
from infrahub.log import get_log_data, get_logger
|
|
19
|
+
from infrahub.permissions import define_global_permission_from_branch
|
|
19
20
|
from infrahub.worker import WORKER_IDENTITY
|
|
20
21
|
|
|
21
22
|
from ..types import DropdownFields
|
|
@@ -32,6 +33,14 @@ if TYPE_CHECKING:
|
|
|
32
33
|
log = get_logger()
|
|
33
34
|
|
|
34
35
|
|
|
36
|
+
def _validate_schema_permission(graphql_context: GraphqlContext) -> None:
|
|
37
|
+
graphql_context.active_permissions.raise_for_permission(
|
|
38
|
+
permission=define_global_permission_from_branch(
|
|
39
|
+
permission=GlobalPermissions.MANAGE_SCHEMA, branch_name=graphql_context.branch.name
|
|
40
|
+
)
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
35
44
|
class SchemaEnumInput(InputObjectType):
|
|
36
45
|
kind = String(required=True)
|
|
37
46
|
attribute = String(required=True)
|
|
@@ -69,6 +78,7 @@ class SchemaDropdownAdd(Mutation):
|
|
|
69
78
|
) -> Self:
|
|
70
79
|
graphql_context: GraphqlContext = info.context
|
|
71
80
|
|
|
81
|
+
_validate_schema_permission(graphql_context=graphql_context)
|
|
72
82
|
await apply_external_context(graphql_context=graphql_context, context_input=context)
|
|
73
83
|
|
|
74
84
|
kind = graphql_context.db.schema.get(name=str(data.kind), branch=graphql_context.branch.name)
|
|
@@ -130,6 +140,7 @@ class SchemaDropdownRemove(Mutation):
|
|
|
130
140
|
) -> dict[str, bool]:
|
|
131
141
|
graphql_context: GraphqlContext = info.context
|
|
132
142
|
|
|
143
|
+
_validate_schema_permission(graphql_context=graphql_context)
|
|
133
144
|
kind = graphql_context.db.schema.get(name=str(data.kind), branch=graphql_context.branch.name)
|
|
134
145
|
await apply_external_context(graphql_context=graphql_context, context_input=context)
|
|
135
146
|
|
|
@@ -185,6 +196,7 @@ class SchemaEnumAdd(Mutation):
|
|
|
185
196
|
) -> dict[str, bool]:
|
|
186
197
|
graphql_context: GraphqlContext = info.context
|
|
187
198
|
|
|
199
|
+
_validate_schema_permission(graphql_context=graphql_context)
|
|
188
200
|
kind = graphql_context.db.schema.get(name=str(data.kind), branch=graphql_context.branch.name)
|
|
189
201
|
await apply_external_context(graphql_context=graphql_context, context_input=context)
|
|
190
202
|
|
|
@@ -230,6 +242,7 @@ class SchemaEnumRemove(Mutation):
|
|
|
230
242
|
) -> dict[str, bool]:
|
|
231
243
|
graphql_context: GraphqlContext = info.context
|
|
232
244
|
|
|
245
|
+
_validate_schema_permission(graphql_context=graphql_context)
|
|
233
246
|
kind = graphql_context.db.schema.get(name=str(data.kind), branch=graphql_context.branch.name)
|
|
234
247
|
await apply_external_context(graphql_context=graphql_context, context_input=context)
|
|
235
248
|
|
infrahub/graphql/schema.py
CHANGED
|
@@ -21,21 +21,10 @@ from .mutations.diff import DiffUpdateMutation
|
|
|
21
21
|
from .mutations.diff_conflict import ResolveDiffConflict
|
|
22
22
|
from .mutations.generator import GeneratorDefinitionRequestRun
|
|
23
23
|
from .mutations.proposed_change import ProposedChangeMerge, ProposedChangeRequestRunCheck
|
|
24
|
-
from .mutations.relationship import
|
|
25
|
-
|
|
26
|
-
RelationshipRemove,
|
|
27
|
-
)
|
|
28
|
-
from .mutations.repository import (
|
|
29
|
-
ProcessRepository,
|
|
30
|
-
ValidateRepositoryConnectivity,
|
|
31
|
-
)
|
|
24
|
+
from .mutations.relationship import RelationshipAdd, RelationshipRemove
|
|
25
|
+
from .mutations.repository import ProcessRepository, ValidateRepositoryConnectivity
|
|
32
26
|
from .mutations.resource_manager import IPAddressPoolGetResource, IPPrefixPoolGetResource
|
|
33
|
-
from .mutations.schema import
|
|
34
|
-
SchemaDropdownAdd,
|
|
35
|
-
SchemaDropdownRemove,
|
|
36
|
-
SchemaEnumAdd,
|
|
37
|
-
SchemaEnumRemove,
|
|
38
|
-
)
|
|
27
|
+
from .mutations.schema import SchemaDropdownAdd, SchemaDropdownRemove, SchemaEnumAdd, SchemaEnumRemove
|
|
39
28
|
from .queries import (
|
|
40
29
|
AccountPermissions,
|
|
41
30
|
AccountToken,
|
infrahub/permissions/__init__.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from infrahub.permissions.backend import PermissionBackend
|
|
2
|
+
from infrahub.permissions.globals import define_global_permission_from_branch
|
|
2
3
|
from infrahub.permissions.local_backend import LocalPermissionBackend
|
|
3
4
|
from infrahub.permissions.manager import PermissionManager
|
|
4
5
|
from infrahub.permissions.report import report_schema_permissions
|
|
@@ -9,6 +10,7 @@ __all__ = [
|
|
|
9
10
|
"LocalPermissionBackend",
|
|
10
11
|
"PermissionBackend",
|
|
11
12
|
"PermissionManager",
|
|
13
|
+
"define_global_permission_from_branch",
|
|
12
14
|
"get_global_permission_for_kind",
|
|
13
15
|
"report_schema_permissions",
|
|
14
16
|
]
|
infrahub_sdk/utils.py
CHANGED
|
@@ -142,14 +142,18 @@ def deep_merge_dict(dicta: dict, dictb: dict, path: list | None = None) -> dict:
|
|
|
142
142
|
if path is None:
|
|
143
143
|
path = []
|
|
144
144
|
for key in dictb:
|
|
145
|
+
b_val = dictb[key]
|
|
145
146
|
if key in dicta:
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
147
|
+
a_val = dicta[key]
|
|
148
|
+
if isinstance(a_val, dict) and isinstance(b_val, dict):
|
|
149
|
+
deep_merge_dict(a_val, b_val, path + [str(key)])
|
|
150
|
+
elif isinstance(a_val, list) and isinstance(b_val, list):
|
|
149
151
|
# Merge lists
|
|
150
152
|
# Cannot use compare_list because list of dicts won't work (dict not hashable)
|
|
151
|
-
dicta[key] = [i for i in
|
|
152
|
-
elif
|
|
153
|
+
dicta[key] = [i for i in a_val if i not in b_val] + b_val
|
|
154
|
+
elif a_val is None and b_val is not None:
|
|
155
|
+
dicta[key] = b_val
|
|
156
|
+
elif a_val == b_val or (a_val is not None and b_val is None):
|
|
153
157
|
continue
|
|
154
158
|
else:
|
|
155
159
|
raise ValueError("Conflict at %s" % ".".join(path + [str(key)]))
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: infrahub-server
|
|
3
|
-
Version: 1.3.
|
|
3
|
+
Version: 1.3.7
|
|
4
4
|
Summary: Infrahub is taking a new approach to Infrastructure Management by providing a new generation of datastore to organize and control all the data that defines how an infrastructure should run.
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Author: OpsMill
|