infrahub-server 1.3.1__py3-none-any.whl → 1.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/cli/db.py +194 -13
- infrahub/core/branch/enums.py +8 -0
- infrahub/core/branch/models.py +28 -5
- infrahub/core/branch/tasks.py +5 -7
- infrahub/core/diff/calculator.py +4 -1
- infrahub/core/diff/coordinator.py +32 -34
- infrahub/core/diff/diff_locker.py +26 -0
- infrahub/core/diff/query_parser.py +23 -32
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/initialization.py +4 -3
- infrahub/core/merge.py +31 -16
- infrahub/core/migrations/graph/__init__.py +24 -0
- infrahub/core/migrations/graph/m012_convert_account_generic.py +4 -3
- infrahub/core/migrations/graph/m013_convert_git_password_credential.py +4 -3
- infrahub/core/migrations/graph/m032_cleanup_orphaned_branch_relationships.py +105 -0
- infrahub/core/migrations/graph/m033_deduplicate_relationship_vertices.py +97 -0
- infrahub/core/node/__init__.py +3 -0
- infrahub/core/node/constraints/grouped_uniqueness.py +88 -132
- infrahub/core/node/resource_manager/ip_address_pool.py +5 -3
- infrahub/core/node/resource_manager/ip_prefix_pool.py +7 -4
- infrahub/core/node/resource_manager/number_pool.py +3 -1
- infrahub/core/node/standard.py +4 -0
- infrahub/core/query/branch.py +25 -56
- infrahub/core/query/node.py +78 -24
- infrahub/core/query/relationship.py +11 -8
- infrahub/core/relationship/model.py +10 -5
- infrahub/core/validators/uniqueness/model.py +17 -0
- infrahub/core/validators/uniqueness/query.py +212 -1
- infrahub/dependencies/builder/diff/coordinator.py +3 -0
- infrahub/dependencies/builder/diff/locker.py +8 -0
- infrahub/graphql/mutations/main.py +25 -4
- infrahub/graphql/mutations/tasks.py +2 -0
- infrahub_sdk/node/node.py +22 -10
- infrahub_sdk/node/related_node.py +7 -0
- {infrahub_server-1.3.1.dist-info → infrahub_server-1.3.3.dist-info}/METADATA +1 -1
- {infrahub_server-1.3.1.dist-info → infrahub_server-1.3.3.dist-info}/RECORD +42 -37
- infrahub_testcontainers/container.py +1 -1
- infrahub_testcontainers/docker-compose-cluster.test.yml +3 -0
- infrahub_testcontainers/docker-compose.test.yml +1 -0
- {infrahub_server-1.3.1.dist-info → infrahub_server-1.3.3.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.3.1.dist-info → infrahub_server-1.3.3.dist-info}/WHEEL +0 -0
- {infrahub_server-1.3.1.dist-info → infrahub_server-1.3.3.dist-info}/entry_points.txt +0 -0
|
@@ -1,26 +1,19 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from typing import TYPE_CHECKING
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
4
|
|
|
5
5
|
from infrahub.core import registry
|
|
6
6
|
from infrahub.core.constants import NULL_VALUE
|
|
7
|
-
from infrahub.core.schema import (
|
|
8
|
-
MainSchemaTypes,
|
|
9
|
-
SchemaAttributePath,
|
|
10
|
-
SchemaAttributePathValue,
|
|
11
|
-
)
|
|
12
7
|
from infrahub.core.schema.basenode_schema import (
|
|
13
|
-
SchemaUniquenessConstraintPath,
|
|
14
8
|
UniquenessConstraintType,
|
|
15
9
|
UniquenessConstraintViolation,
|
|
16
10
|
)
|
|
17
|
-
from infrahub.core.validators.uniqueness.index import UniquenessQueryResultsIndex
|
|
18
11
|
from infrahub.core.validators.uniqueness.model import (
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
12
|
+
NodeUniquenessQueryRequestValued,
|
|
13
|
+
QueryAttributePathValued,
|
|
14
|
+
QueryRelationshipPathValued,
|
|
22
15
|
)
|
|
23
|
-
from infrahub.core.validators.uniqueness.query import
|
|
16
|
+
from infrahub.core.validators.uniqueness.query import UniquenessValidationQuery
|
|
24
17
|
from infrahub.exceptions import HFIDViolatedError, ValidationError
|
|
25
18
|
|
|
26
19
|
from .interface import NodeConstraintInterface
|
|
@@ -28,8 +21,11 @@ from .interface import NodeConstraintInterface
|
|
|
28
21
|
if TYPE_CHECKING:
|
|
29
22
|
from infrahub.core.branch import Branch
|
|
30
23
|
from infrahub.core.node import Node
|
|
31
|
-
from infrahub.core.query import QueryResult
|
|
32
24
|
from infrahub.core.relationship.model import RelationshipManager
|
|
25
|
+
from infrahub.core.schema import (
|
|
26
|
+
MainSchemaTypes,
|
|
27
|
+
SchemaAttributePath,
|
|
28
|
+
)
|
|
33
29
|
from infrahub.core.timestamp import Timestamp
|
|
34
30
|
from infrahub.database import InfrahubDatabase
|
|
35
31
|
|
|
@@ -40,72 +36,38 @@ class NodeGroupedUniquenessConstraint(NodeConstraintInterface):
|
|
|
40
36
|
self.branch = branch
|
|
41
37
|
self.schema_branch = registry.schema.get_schema_branch(branch.name)
|
|
42
38
|
|
|
43
|
-
async def
|
|
44
|
-
self,
|
|
45
|
-
updated_node: Node,
|
|
46
|
-
node_schema: MainSchemaTypes,
|
|
47
|
-
uniqueness_constraint_paths: list[SchemaUniquenessConstraintPath],
|
|
48
|
-
filters: list[str] | None = None,
|
|
49
|
-
) -> NodeUniquenessQueryRequest:
|
|
50
|
-
query_request = NodeUniquenessQueryRequest(kind=node_schema.kind)
|
|
51
|
-
for uniqueness_constraint_path in uniqueness_constraint_paths:
|
|
52
|
-
include_in_query = not filters
|
|
53
|
-
query_relationship_paths: set[QueryRelationshipAttributePath] = set()
|
|
54
|
-
query_attribute_paths: set[QueryAttributePath] = set()
|
|
55
|
-
for attribute_path in uniqueness_constraint_path.attributes_paths:
|
|
56
|
-
if attribute_path.related_schema and attribute_path.relationship_schema:
|
|
57
|
-
if filters and attribute_path.relationship_schema.name in filters:
|
|
58
|
-
include_in_query = True
|
|
59
|
-
|
|
60
|
-
relationship_manager: RelationshipManager = getattr(
|
|
61
|
-
updated_node, attribute_path.relationship_schema.name
|
|
62
|
-
)
|
|
63
|
-
related_node = await relationship_manager.get_peer(db=self.db)
|
|
64
|
-
related_node_id = related_node.get_id() if related_node else None
|
|
65
|
-
query_relationship_paths.add(
|
|
66
|
-
QueryRelationshipAttributePath(
|
|
67
|
-
identifier=attribute_path.relationship_schema.get_identifier(),
|
|
68
|
-
value=related_node_id,
|
|
69
|
-
)
|
|
70
|
-
)
|
|
71
|
-
continue
|
|
72
|
-
if attribute_path.attribute_schema:
|
|
73
|
-
if filters and attribute_path.attribute_schema.name in filters:
|
|
74
|
-
include_in_query = True
|
|
75
|
-
attribute_name = attribute_path.attribute_schema.name
|
|
76
|
-
attribute = getattr(updated_node, attribute_name)
|
|
77
|
-
if attribute.is_enum and attribute.value:
|
|
78
|
-
attribute_value = attribute.value.value
|
|
79
|
-
else:
|
|
80
|
-
attribute_value = attribute.value
|
|
81
|
-
if attribute_value is None:
|
|
82
|
-
attribute_value = NULL_VALUE
|
|
83
|
-
query_attribute_paths.add(
|
|
84
|
-
QueryAttributePath(
|
|
85
|
-
attribute_name=attribute_name,
|
|
86
|
-
property_name=attribute_path.attribute_property_name or "value",
|
|
87
|
-
value=attribute_value,
|
|
88
|
-
)
|
|
89
|
-
)
|
|
90
|
-
if include_in_query:
|
|
91
|
-
query_request.relationship_attribute_paths |= query_relationship_paths
|
|
92
|
-
query_request.unique_attribute_paths |= query_attribute_paths
|
|
93
|
-
return query_request
|
|
94
|
-
|
|
95
|
-
async def _get_node_attribute_path_values(
|
|
39
|
+
async def _get_unique_valued_paths(
|
|
96
40
|
self,
|
|
97
41
|
updated_node: Node,
|
|
98
42
|
path_group: list[SchemaAttributePath],
|
|
99
|
-
|
|
100
|
-
|
|
43
|
+
filters: list[str],
|
|
44
|
+
) -> list[QueryAttributePathValued | QueryRelationshipPathValued]:
|
|
45
|
+
# if filters are provided, we need to check if the path group is relevant to the filters
|
|
46
|
+
if filters:
|
|
47
|
+
field_names: list[str] = []
|
|
48
|
+
for schema_attribute_path in path_group:
|
|
49
|
+
if schema_attribute_path.relationship_schema:
|
|
50
|
+
field_names.append(schema_attribute_path.relationship_schema.name)
|
|
51
|
+
elif schema_attribute_path.attribute_schema:
|
|
52
|
+
field_names.append(schema_attribute_path.attribute_schema.name)
|
|
53
|
+
|
|
54
|
+
if not set(field_names) & set(filters):
|
|
55
|
+
return []
|
|
56
|
+
|
|
57
|
+
valued_paths: list[QueryAttributePathValued | QueryRelationshipPathValued] = []
|
|
101
58
|
for schema_attribute_path in path_group:
|
|
102
59
|
if schema_attribute_path.relationship_schema:
|
|
103
60
|
relationship_name = schema_attribute_path.relationship_schema.name
|
|
104
61
|
relationship_manager: RelationshipManager = getattr(updated_node, relationship_name)
|
|
105
62
|
related_node = await relationship_manager.get_peer(db=self.db)
|
|
106
63
|
related_node_id = related_node.get_id() if related_node else None
|
|
107
|
-
|
|
108
|
-
|
|
64
|
+
valued_paths.append(
|
|
65
|
+
QueryRelationshipPathValued(
|
|
66
|
+
relationship_schema=schema_attribute_path.relationship_schema,
|
|
67
|
+
peer_id=related_node_id,
|
|
68
|
+
attribute_name=None,
|
|
69
|
+
attribute_value=None,
|
|
70
|
+
)
|
|
109
71
|
)
|
|
110
72
|
elif schema_attribute_path.attribute_schema:
|
|
111
73
|
attribute_name = schema_attribute_path.attribute_schema.name
|
|
@@ -115,86 +77,79 @@ class NodeGroupedUniquenessConstraint(NodeConstraintInterface):
|
|
|
115
77
|
attribute_value = attribute_value.value
|
|
116
78
|
elif attribute_value is None:
|
|
117
79
|
attribute_value = NULL_VALUE
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
80
|
+
valued_paths.append(
|
|
81
|
+
QueryAttributePathValued(
|
|
82
|
+
attribute_name=attribute_name,
|
|
121
83
|
value=attribute_value,
|
|
122
84
|
)
|
|
123
85
|
)
|
|
124
|
-
return
|
|
86
|
+
return valued_paths
|
|
125
87
|
|
|
126
|
-
async def
|
|
88
|
+
async def _get_single_schema_violations(
|
|
127
89
|
self,
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
90
|
+
node: Node,
|
|
91
|
+
node_schema: MainSchemaTypes,
|
|
92
|
+
filters: list[str],
|
|
93
|
+
at: Timestamp | None = None,
|
|
131
94
|
) -> list[UniquenessConstraintViolation]:
|
|
132
|
-
|
|
133
|
-
|
|
95
|
+
schema_branch = self.db.schema.get_schema_branch(name=self.branch.name)
|
|
96
|
+
|
|
97
|
+
uniqueness_constraint_paths = node_schema.get_unique_constraint_schema_attribute_paths(
|
|
98
|
+
schema_branch=schema_branch
|
|
134
99
|
)
|
|
135
|
-
|
|
100
|
+
|
|
101
|
+
violations: list[UniquenessConstraintViolation] = []
|
|
136
102
|
for uniqueness_constraint_path in uniqueness_constraint_paths:
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
103
|
+
valued_paths = await self._get_unique_valued_paths(
|
|
104
|
+
updated_node=node,
|
|
105
|
+
path_group=uniqueness_constraint_path.attributes_paths,
|
|
106
|
+
filters=filters,
|
|
140
107
|
)
|
|
141
108
|
|
|
142
|
-
|
|
143
|
-
if any(sapv.value is None for sapv in schema_attribute_path_values):
|
|
109
|
+
if not valued_paths:
|
|
144
110
|
continue
|
|
145
111
|
|
|
146
|
-
|
|
147
|
-
|
|
112
|
+
# Create the valued query request for this constraint
|
|
113
|
+
valued_query_request = NodeUniquenessQueryRequestValued(
|
|
114
|
+
kind=node_schema.kind,
|
|
115
|
+
unique_valued_paths=valued_paths,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# Execute the query
|
|
119
|
+
query = await UniquenessValidationQuery.init(
|
|
120
|
+
db=self.db,
|
|
121
|
+
branch=self.branch,
|
|
122
|
+
at=at,
|
|
123
|
+
query_request=valued_query_request,
|
|
124
|
+
node_ids_to_exclude=[node.get_id()],
|
|
125
|
+
)
|
|
126
|
+
await query.execute(db=self.db)
|
|
127
|
+
|
|
128
|
+
# Get violation nodes from the query results
|
|
129
|
+
violation_nodes = query.get_violation_nodes()
|
|
130
|
+
if not violation_nodes:
|
|
148
131
|
continue
|
|
149
132
|
|
|
133
|
+
# Create violation object
|
|
150
134
|
uniqueness_constraint_fields = []
|
|
151
|
-
for
|
|
152
|
-
if
|
|
153
|
-
uniqueness_constraint_fields.append(
|
|
154
|
-
elif
|
|
155
|
-
uniqueness_constraint_fields.append(
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
135
|
+
for valued_path in valued_paths:
|
|
136
|
+
if isinstance(valued_path, QueryRelationshipPathValued):
|
|
137
|
+
uniqueness_constraint_fields.append(valued_path.relationship_schema.name)
|
|
138
|
+
elif isinstance(valued_path, QueryAttributePathValued):
|
|
139
|
+
uniqueness_constraint_fields.append(valued_path.attribute_name)
|
|
140
|
+
|
|
141
|
+
matching_node_ids = {node_id for node_id, _ in violation_nodes}
|
|
142
|
+
if matching_node_ids:
|
|
143
|
+
violations.append(
|
|
144
|
+
UniquenessConstraintViolation(
|
|
145
|
+
nodes_ids=matching_node_ids,
|
|
146
|
+
fields=uniqueness_constraint_fields,
|
|
147
|
+
typ=uniqueness_constraint_path.typ,
|
|
148
|
+
)
|
|
162
149
|
)
|
|
163
|
-
)
|
|
164
150
|
|
|
165
151
|
return violations
|
|
166
152
|
|
|
167
|
-
async def _get_single_schema_violations(
|
|
168
|
-
self,
|
|
169
|
-
node: Node,
|
|
170
|
-
node_schema: MainSchemaTypes,
|
|
171
|
-
at: Timestamp | None = None,
|
|
172
|
-
filters: list[str] | None = None,
|
|
173
|
-
) -> list[UniquenessConstraintViolation]:
|
|
174
|
-
schema_branch = self.db.schema.get_schema_branch(name=self.branch.name)
|
|
175
|
-
|
|
176
|
-
uniqueness_constraint_paths = node_schema.get_unique_constraint_schema_attribute_paths(
|
|
177
|
-
schema_branch=schema_branch
|
|
178
|
-
)
|
|
179
|
-
query_request = await self._build_query_request(
|
|
180
|
-
updated_node=node,
|
|
181
|
-
node_schema=node_schema,
|
|
182
|
-
uniqueness_constraint_paths=uniqueness_constraint_paths,
|
|
183
|
-
filters=filters,
|
|
184
|
-
)
|
|
185
|
-
if not query_request:
|
|
186
|
-
return []
|
|
187
|
-
|
|
188
|
-
query = await NodeUniqueAttributeConstraintQuery.init(
|
|
189
|
-
db=self.db, branch=self.branch, at=at, query_request=query_request, min_count_required=0
|
|
190
|
-
)
|
|
191
|
-
await query.execute(db=self.db)
|
|
192
|
-
return await self._get_violations(
|
|
193
|
-
updated_node=node,
|
|
194
|
-
uniqueness_constraint_paths=uniqueness_constraint_paths,
|
|
195
|
-
query_results=query.get_results(),
|
|
196
|
-
)
|
|
197
|
-
|
|
198
153
|
async def check(self, node: Node, at: Timestamp | None = None, filters: list[str] | None = None) -> None:
|
|
199
154
|
def _frozen_constraints(schema: MainSchemaTypes) -> frozenset[frozenset[str]]:
|
|
200
155
|
if not schema.uniqueness_constraints:
|
|
@@ -218,7 +173,8 @@ class NodeGroupedUniquenessConstraint(NodeConstraintInterface):
|
|
|
218
173
|
if include_node_schema:
|
|
219
174
|
schemas_to_check.append(node_schema)
|
|
220
175
|
|
|
221
|
-
violations = []
|
|
176
|
+
violations: list[UniquenessConstraintViolation] = []
|
|
177
|
+
|
|
222
178
|
for schema in schemas_to_check:
|
|
223
179
|
schema_filters = list(filters) if filters is not None else []
|
|
224
180
|
for attr_schema in schema.attributes:
|
|
@@ -18,6 +18,7 @@ from .. import Node
|
|
|
18
18
|
if TYPE_CHECKING:
|
|
19
19
|
from infrahub.core.branch import Branch
|
|
20
20
|
from infrahub.core.ipam.constants import IPAddressType
|
|
21
|
+
from infrahub.core.timestamp import Timestamp
|
|
21
22
|
from infrahub.database import InfrahubDatabase
|
|
22
23
|
|
|
23
24
|
|
|
@@ -30,6 +31,7 @@ class CoreIPAddressPool(Node):
|
|
|
30
31
|
data: dict[str, Any] | None = None,
|
|
31
32
|
address_type: str | None = None,
|
|
32
33
|
prefixlen: int | None = None,
|
|
34
|
+
at: Timestamp | None = None,
|
|
33
35
|
) -> Node:
|
|
34
36
|
# Check if there is already a resource allocated with this identifier
|
|
35
37
|
# if not, pull all existing prefixes and allocated the next available
|
|
@@ -63,18 +65,18 @@ class CoreIPAddressPool(Node):
|
|
|
63
65
|
next_address = await self.get_next(db=db, prefixlen=prefixlen)
|
|
64
66
|
|
|
65
67
|
target_schema = registry.get_node_schema(name=address_type, branch=branch)
|
|
66
|
-
node = await Node.init(db=db, schema=target_schema, branch=branch)
|
|
68
|
+
node = await Node.init(db=db, schema=target_schema, branch=branch, at=at)
|
|
67
69
|
try:
|
|
68
70
|
await node.new(db=db, address=str(next_address), ip_namespace=ip_namespace, **data)
|
|
69
71
|
except ValidationError as exc:
|
|
70
72
|
raise ValueError(f"IPAddressPool: {self.name.value} | {exc!s}") from exc # type: ignore[attr-defined]
|
|
71
|
-
await node.save(db=db)
|
|
73
|
+
await node.save(db=db, at=at)
|
|
72
74
|
reconciler = IpamReconciler(db=db, branch=branch)
|
|
73
75
|
await reconciler.reconcile(ip_value=next_address, namespace=ip_namespace.id, node_uuid=node.get_id())
|
|
74
76
|
|
|
75
77
|
if identifier:
|
|
76
78
|
query_set = await IPAddressPoolSetReserved.init(
|
|
77
|
-
db=db, pool_id=self.id, identifier=identifier, address_id=node.id
|
|
79
|
+
db=db, pool_id=self.id, identifier=identifier, address_id=node.id, at=at
|
|
78
80
|
)
|
|
79
81
|
await query_set.execute(db=db)
|
|
80
82
|
|
|
@@ -20,6 +20,7 @@ from .. import Node
|
|
|
20
20
|
if TYPE_CHECKING:
|
|
21
21
|
from infrahub.core.branch import Branch
|
|
22
22
|
from infrahub.core.ipam.constants import IPNetworkType
|
|
23
|
+
from infrahub.core.timestamp import Timestamp
|
|
23
24
|
from infrahub.database import InfrahubDatabase
|
|
24
25
|
|
|
25
26
|
|
|
@@ -33,6 +34,7 @@ class CoreIPPrefixPool(Node):
|
|
|
33
34
|
prefixlen: int | None = None,
|
|
34
35
|
member_type: str | None = None,
|
|
35
36
|
prefix_type: str | None = None,
|
|
37
|
+
at: Timestamp | None = None,
|
|
36
38
|
) -> Node:
|
|
37
39
|
# Check if there is already a resource allocated with this identifier
|
|
38
40
|
# if not, pull all existing prefixes and allocated the next available
|
|
@@ -68,20 +70,21 @@ class CoreIPPrefixPool(Node):
|
|
|
68
70
|
)
|
|
69
71
|
|
|
70
72
|
member_type = member_type or data.get("member_type", None) or self.default_member_type.value.value # type: ignore[attr-defined]
|
|
73
|
+
data["member_type"] = member_type
|
|
71
74
|
|
|
72
75
|
target_schema = registry.get_node_schema(name=prefix_type, branch=branch)
|
|
73
|
-
node = await Node.init(db=db, schema=target_schema, branch=branch)
|
|
76
|
+
node = await Node.init(db=db, schema=target_schema, branch=branch, at=at)
|
|
74
77
|
try:
|
|
75
|
-
await node.new(db=db, prefix=str(next_prefix),
|
|
78
|
+
await node.new(db=db, prefix=str(next_prefix), ip_namespace=ip_namespace, **data)
|
|
76
79
|
except ValidationError as exc:
|
|
77
80
|
raise ValueError(f"IPPrefixPool: {self.name.value} | {exc!s}") from exc # type: ignore[attr-defined]
|
|
78
|
-
await node.save(db=db)
|
|
81
|
+
await node.save(db=db, at=at)
|
|
79
82
|
reconciler = IpamReconciler(db=db, branch=branch)
|
|
80
83
|
await reconciler.reconcile(ip_value=next_prefix, namespace=ip_namespace.id, node_uuid=node.get_id())
|
|
81
84
|
|
|
82
85
|
if identifier:
|
|
83
86
|
query_set = await PrefixPoolSetReserved.init(
|
|
84
|
-
db=db, pool_id=self.id, identifier=identifier, prefix_id=node.id
|
|
87
|
+
db=db, pool_id=self.id, identifier=identifier, prefix_id=node.id, at=at
|
|
85
88
|
)
|
|
86
89
|
await query_set.execute(db=db)
|
|
87
90
|
|
|
@@ -12,6 +12,7 @@ from .. import Node
|
|
|
12
12
|
if TYPE_CHECKING:
|
|
13
13
|
from infrahub.core.attribute import BaseAttribute
|
|
14
14
|
from infrahub.core.branch import Branch
|
|
15
|
+
from infrahub.core.timestamp import Timestamp
|
|
15
16
|
from infrahub.database import InfrahubDatabase
|
|
16
17
|
|
|
17
18
|
|
|
@@ -41,6 +42,7 @@ class CoreNumberPool(Node):
|
|
|
41
42
|
node: Node,
|
|
42
43
|
attribute: BaseAttribute,
|
|
43
44
|
identifier: str | None = None,
|
|
45
|
+
at: Timestamp | None = None,
|
|
44
46
|
) -> int:
|
|
45
47
|
identifier = identifier or node.get_id()
|
|
46
48
|
# Check if there is already a resource allocated with this identifier
|
|
@@ -56,7 +58,7 @@ class CoreNumberPool(Node):
|
|
|
56
58
|
number = await self.get_next(db=db, branch=branch, attribute=attribute)
|
|
57
59
|
|
|
58
60
|
query_set = await NumberPoolSetReserved.init(
|
|
59
|
-
db=db, pool_id=self.get_id(), identifier=identifier, reserved=number
|
|
61
|
+
db=db, pool_id=self.get_id(), identifier=identifier, reserved=number, at=at
|
|
60
62
|
)
|
|
61
63
|
await query_set.execute(db=db)
|
|
62
64
|
return number
|
infrahub/core/node/standard.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import inspect
|
|
4
|
+
from enum import Enum
|
|
4
5
|
from typing import TYPE_CHECKING, Any, Optional, Union, get_args, get_origin
|
|
5
6
|
from uuid import UUID
|
|
6
7
|
|
|
@@ -191,6 +192,9 @@ class StandardNode(BaseModel):
|
|
|
191
192
|
continue
|
|
192
193
|
|
|
193
194
|
attr_value = getattr(self, attr_name)
|
|
195
|
+
if isinstance(attr_value, Enum):
|
|
196
|
+
attr_value = attr_value.value
|
|
197
|
+
|
|
194
198
|
field_type = self.guess_field_type(field)
|
|
195
199
|
|
|
196
200
|
if attr_value is None:
|
infrahub/core/query/branch.py
CHANGED
|
@@ -3,43 +3,12 @@ from __future__ import annotations
|
|
|
3
3
|
from typing import TYPE_CHECKING, Any
|
|
4
4
|
|
|
5
5
|
from infrahub import config
|
|
6
|
-
from infrahub.core.constants import RelationshipStatus
|
|
7
6
|
from infrahub.core.query import Query, QueryType
|
|
8
7
|
|
|
9
8
|
if TYPE_CHECKING:
|
|
10
9
|
from infrahub.database import InfrahubDatabase
|
|
11
10
|
|
|
12
11
|
|
|
13
|
-
class AddNodeToBranch(Query):
|
|
14
|
-
name: str = "node_add_to_branch"
|
|
15
|
-
insert_return: bool = False
|
|
16
|
-
|
|
17
|
-
type: QueryType = QueryType.WRITE
|
|
18
|
-
|
|
19
|
-
def __init__(self, node_id: int, **kwargs: Any):
|
|
20
|
-
self.node_id = node_id
|
|
21
|
-
super().__init__(**kwargs)
|
|
22
|
-
|
|
23
|
-
async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
|
|
24
|
-
query = """
|
|
25
|
-
MATCH (root:Root)
|
|
26
|
-
MATCH (d) WHERE %(id_func)s(d) = $node_id
|
|
27
|
-
WITH root,d
|
|
28
|
-
CREATE (d)-[r:IS_PART_OF { branch: $branch, branch_level: $branch_level, from: $now, status: $status }]->(root)
|
|
29
|
-
RETURN %(id_func)s(r)
|
|
30
|
-
""" % {
|
|
31
|
-
"id_func": db.get_id_function_name(),
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
self.params["node_id"] = db.to_database_id(self.node_id)
|
|
35
|
-
self.params["now"] = self.at.to_string()
|
|
36
|
-
self.params["branch"] = self.branch.name
|
|
37
|
-
self.params["branch_level"] = self.branch.hierarchy_level
|
|
38
|
-
self.params["status"] = RelationshipStatus.ACTIVE.value
|
|
39
|
-
|
|
40
|
-
self.add_to_query(query)
|
|
41
|
-
|
|
42
|
-
|
|
43
12
|
class DeleteBranchRelationshipsQuery(Query):
|
|
44
13
|
name: str = "delete_branch_relationships"
|
|
45
14
|
insert_return: bool = False
|
|
@@ -52,31 +21,31 @@ class DeleteBranchRelationshipsQuery(Query):
|
|
|
52
21
|
|
|
53
22
|
async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
|
|
54
23
|
query = """
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
24
|
+
// delete all relationships on this branch
|
|
25
|
+
MATCH (s)-[r1]-(d)
|
|
26
|
+
WHERE r1.branch = $branch_name
|
|
27
|
+
CALL (r1) {
|
|
28
|
+
DELETE r1
|
|
29
|
+
} IN TRANSACTIONS
|
|
30
|
+
|
|
31
|
+
// check for any orphaned Node vertices and delete them
|
|
32
|
+
WITH collect(DISTINCT s.uuid) + collect(DISTINCT d.uuid) AS nodes_uuids
|
|
33
|
+
MATCH (s2:Node)-[r2]-(d2)
|
|
34
|
+
WHERE NOT exists((s2)-[:IS_PART_OF]-(:Root))
|
|
35
|
+
AND s2.uuid IN nodes_uuids
|
|
36
|
+
CALL (r2) {
|
|
37
|
+
DELETE r2
|
|
38
|
+
} IN TRANSACTIONS
|
|
39
|
+
|
|
40
|
+
// reduce results to a single row
|
|
41
|
+
WITH 1 AS one LIMIT 1
|
|
42
|
+
|
|
43
|
+
// find any orphaned vertices and delete them
|
|
44
|
+
MATCH (n)
|
|
45
|
+
WHERE NOT exists((n)--())
|
|
46
|
+
CALL (n) {
|
|
47
|
+
DELETE n
|
|
48
|
+
} IN TRANSACTIONS
|
|
80
49
|
"""
|
|
81
50
|
self.params["branch_name"] = self.branch_name
|
|
82
51
|
self.add_to_query(query)
|