infrahub-server 1.3.0a0__py3-none-any.whl → 1.3.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/core/attribute.py +3 -3
- infrahub/core/constants/__init__.py +5 -0
- infrahub/core/constants/infrahubkind.py +2 -0
- infrahub/core/migrations/query/attribute_rename.py +2 -4
- infrahub/core/migrations/query/delete_element_in_schema.py +16 -11
- infrahub/core/migrations/query/node_duplicate.py +16 -15
- infrahub/core/migrations/query/relationship_duplicate.py +16 -11
- infrahub/core/migrations/schema/node_attribute_remove.py +1 -2
- infrahub/core/migrations/schema/node_remove.py +16 -13
- infrahub/core/node/__init__.py +72 -14
- infrahub/core/node/resource_manager/ip_address_pool.py +6 -2
- infrahub/core/node/resource_manager/ip_prefix_pool.py +6 -2
- infrahub/core/node/resource_manager/number_pool.py +31 -5
- infrahub/core/node/standard.py +6 -1
- infrahub/core/protocols.py +9 -0
- infrahub/core/query/relationship.py +2 -4
- infrahub/core/schema/attribute_parameters.py +129 -5
- infrahub/core/schema/attribute_schema.py +38 -10
- infrahub/core/schema/definitions/core/__init__.py +16 -2
- infrahub/core/schema/definitions/core/group.py +45 -0
- infrahub/core/schema/definitions/core/resource_pool.py +20 -0
- infrahub/core/schema/definitions/internal.py +16 -3
- infrahub/core/schema/generated/attribute_schema.py +12 -5
- infrahub/core/schema/manager.py +3 -0
- infrahub/core/schema/schema_branch.py +55 -0
- infrahub/core/validators/__init__.py +8 -0
- infrahub/core/validators/attribute/choices.py +0 -1
- infrahub/core/validators/attribute/enum.py +0 -1
- infrahub/core/validators/attribute/kind.py +0 -1
- infrahub/core/validators/attribute/length.py +0 -1
- infrahub/core/validators/attribute/min_max.py +118 -0
- infrahub/core/validators/attribute/number_pool.py +106 -0
- infrahub/core/validators/attribute/optional.py +0 -2
- infrahub/core/validators/attribute/regex.py +0 -1
- infrahub/core/validators/enum.py +5 -0
- infrahub/database/__init__.py +15 -3
- infrahub/git/base.py +5 -3
- infrahub/git/integrator.py +102 -3
- infrahub/graphql/mutations/resource_manager.py +62 -6
- infrahub/graphql/queries/resource_manager.py +7 -1
- infrahub/graphql/queries/task.py +10 -0
- infrahub/graphql/types/task_log.py +3 -2
- infrahub/menu/menu.py +3 -3
- infrahub/pools/number.py +5 -3
- infrahub/task_manager/task.py +44 -4
- infrahub/types.py +6 -0
- infrahub_sdk/client.py +43 -10
- infrahub_sdk/node/__init__.py +39 -0
- infrahub_sdk/node/attribute.py +122 -0
- infrahub_sdk/node/constants.py +21 -0
- infrahub_sdk/{node.py → node/node.py} +50 -749
- infrahub_sdk/node/parsers.py +15 -0
- infrahub_sdk/node/property.py +24 -0
- infrahub_sdk/node/related_node.py +266 -0
- infrahub_sdk/node/relationship.py +302 -0
- infrahub_sdk/protocols.py +112 -0
- infrahub_sdk/protocols_base.py +34 -2
- infrahub_sdk/query_groups.py +13 -2
- infrahub_sdk/schema/main.py +1 -0
- infrahub_sdk/schema/repository.py +16 -0
- infrahub_sdk/spec/object.py +1 -1
- infrahub_sdk/store.py +1 -1
- infrahub_sdk/testing/schemas/car_person.py +1 -0
- {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b1.dist-info}/METADATA +3 -3
- {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b1.dist-info}/RECORD +68 -59
- {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b1.dist-info}/WHEEL +1 -1
- {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b1.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b1.dist-info}/entry_points.txt +0 -0
infrahub/core/attribute.py
CHANGED
|
@@ -93,8 +93,8 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin):
|
|
|
93
93
|
updated_at: Timestamp | str | None = None,
|
|
94
94
|
is_default: bool = False,
|
|
95
95
|
is_from_profile: bool = False,
|
|
96
|
-
**kwargs,
|
|
97
|
-
):
|
|
96
|
+
**kwargs: dict[str, Any],
|
|
97
|
+
) -> None:
|
|
98
98
|
self.id = id
|
|
99
99
|
self.db_id = db_id
|
|
100
100
|
|
|
@@ -169,7 +169,7 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin):
|
|
|
169
169
|
return self.branch
|
|
170
170
|
|
|
171
171
|
@classmethod
|
|
172
|
-
def __init_subclass__(cls, **kwargs) -> None:
|
|
172
|
+
def __init_subclass__(cls, **kwargs: dict[str, Any]) -> None:
|
|
173
173
|
super().__init_subclass__(**kwargs)
|
|
174
174
|
registry.attribute[cls.__name__] = cls
|
|
175
175
|
|
|
@@ -146,6 +146,11 @@ class AllowOverrideType(InfrahubStringEnum):
|
|
|
146
146
|
ANY = "any"
|
|
147
147
|
|
|
148
148
|
|
|
149
|
+
class RepositoryObjects(InfrahubStringEnum):
|
|
150
|
+
OBJECT = "object"
|
|
151
|
+
MENU = "menu"
|
|
152
|
+
|
|
153
|
+
|
|
149
154
|
class ContentType(InfrahubStringEnum):
|
|
150
155
|
APPLICATION_JSON = "application/json"
|
|
151
156
|
APPLICATION_YAML = "application/yaml"
|
|
@@ -58,6 +58,7 @@ PROFILE = "CoreProfile"
|
|
|
58
58
|
PROPOSEDCHANGE = "CoreProposedChange"
|
|
59
59
|
REFRESHTOKEN = "InternalRefreshToken"
|
|
60
60
|
REPOSITORY = "CoreRepository"
|
|
61
|
+
REPOSITORYGROUP = "CoreRepositoryGroup"
|
|
61
62
|
RESOURCEPOOL = "CoreResourcePool"
|
|
62
63
|
GENERICREPOSITORY = "CoreGenericRepository"
|
|
63
64
|
READONLYREPOSITORY = "CoreReadOnlyRepository"
|
|
@@ -78,3 +79,4 @@ TRIGGERRULE = "CoreTriggerRule"
|
|
|
78
79
|
USERVALIDATOR = "CoreUserValidator"
|
|
79
80
|
VALIDATOR = "CoreValidator"
|
|
80
81
|
WEBHOOK = "CoreWebhook"
|
|
82
|
+
WEIGHTED_POOL_RESOURCE = "CoreWeightedPoolResource"
|
|
@@ -55,7 +55,6 @@ class AttributeRenameQuery(Query):
|
|
|
55
55
|
@staticmethod
|
|
56
56
|
def _render_sub_query_per_rel_type_update_active(rel_type: str, rel_def: FieldInfo) -> str:
|
|
57
57
|
subquery = [
|
|
58
|
-
"WITH peer_node, rb, active_attr",
|
|
59
58
|
"WITH peer_node, rb, active_attr",
|
|
60
59
|
f'WHERE type(rb) = "{rel_type}"',
|
|
61
60
|
]
|
|
@@ -72,7 +71,6 @@ class AttributeRenameQuery(Query):
|
|
|
72
71
|
@staticmethod
|
|
73
72
|
def _render_sub_query_per_rel_type_create_new(rel_type: str, rel_def: FieldInfo) -> str:
|
|
74
73
|
subquery = [
|
|
75
|
-
"WITH peer_node, rb, active_attr, new_attr",
|
|
76
74
|
"WITH peer_node, rb, active_attr, new_attr",
|
|
77
75
|
f'WHERE type(rb) = "{rel_type}"',
|
|
78
76
|
]
|
|
@@ -158,7 +156,7 @@ class AttributeRenameQuery(Query):
|
|
|
158
156
|
}
|
|
159
157
|
WITH a1 as active_attr, r1 as rb, p1 as peer_node, new_attr
|
|
160
158
|
WHERE rb.status = "active"
|
|
161
|
-
CALL {
|
|
159
|
+
CALL (peer_node, rb, active_attr, new_attr){
|
|
162
160
|
%(sub_query_create_all)s
|
|
163
161
|
}
|
|
164
162
|
WITH p2 as peer_node, rb, new_attr, active_attr
|
|
@@ -167,7 +165,7 @@ class AttributeRenameQuery(Query):
|
|
|
167
165
|
|
|
168
166
|
if not (self.branch.is_default or self.branch.is_global):
|
|
169
167
|
query = """
|
|
170
|
-
CALL {
|
|
168
|
+
CALL (peer_node, rb, active_attr) {
|
|
171
169
|
%(sub_query_update_all)s
|
|
172
170
|
}
|
|
173
171
|
WITH p2 as peer_node, rb, new_attr
|
|
@@ -55,7 +55,6 @@ class DeleteElementInSchemaQuery(Query):
|
|
|
55
55
|
@staticmethod
|
|
56
56
|
def _render_sub_query_per_rel_type(rel_name: str, rel_type: str, direction: GraphRelDirection) -> str:
|
|
57
57
|
subquery = [
|
|
58
|
-
f"WITH peer_node, {rel_name}, element_to_delete",
|
|
59
58
|
f"WITH peer_node, {rel_name}, element_to_delete",
|
|
60
59
|
f'WHERE type({rel_name}) = "{rel_type}"',
|
|
61
60
|
]
|
|
@@ -67,28 +66,32 @@ class DeleteElementInSchemaQuery(Query):
|
|
|
67
66
|
return "\n".join(subquery)
|
|
68
67
|
|
|
69
68
|
@classmethod
|
|
70
|
-
def _render_sub_query_out(cls) -> str:
|
|
69
|
+
def _render_sub_query_out(cls) -> tuple[str, str]:
|
|
70
|
+
rel_name = "rel_outband"
|
|
71
|
+
sub_query_out_args = f"peer_node, {rel_name}, element_to_delete"
|
|
71
72
|
sub_queries_out = [
|
|
72
73
|
cls._render_sub_query_per_rel_type(
|
|
73
|
-
rel_name=
|
|
74
|
+
rel_name=rel_name, rel_type=rel_type, direction=GraphRelDirection.OUTBOUND
|
|
74
75
|
)
|
|
75
76
|
for rel_type, rel_def in GraphNodeRelationships.model_fields.items()
|
|
76
77
|
if rel_def.default.direction in [GraphRelDirection.OUTBOUND, GraphRelDirection.EITHER]
|
|
77
78
|
]
|
|
78
79
|
sub_query_out = "\nUNION\n".join(sub_queries_out)
|
|
79
|
-
return sub_query_out
|
|
80
|
+
return sub_query_out, sub_query_out_args
|
|
80
81
|
|
|
81
82
|
@classmethod
|
|
82
|
-
def _render_sub_query_in(cls) -> str:
|
|
83
|
+
def _render_sub_query_in(cls) -> tuple[str, str]:
|
|
84
|
+
rel_name = "rel_inband"
|
|
85
|
+
sub_query_in_args = f"peer_node, {rel_name}, element_to_delete"
|
|
83
86
|
sub_queries_in = [
|
|
84
87
|
cls._render_sub_query_per_rel_type(
|
|
85
|
-
rel_name=
|
|
88
|
+
rel_name=rel_name, rel_type=rel_type, direction=GraphRelDirection.INBOUND
|
|
86
89
|
)
|
|
87
90
|
for rel_type, rel_def in GraphNodeRelationships.model_fields.items()
|
|
88
91
|
if rel_def.default.direction in [GraphRelDirection.INBOUND, GraphRelDirection.EITHER]
|
|
89
92
|
]
|
|
90
93
|
sub_query_in = "\nUNION\n".join(sub_queries_in)
|
|
91
|
-
return sub_query_in
|
|
94
|
+
return sub_query_in, sub_query_in_args
|
|
92
95
|
|
|
93
96
|
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
94
97
|
branch_filter, branch_params = self.branch.get_query_filter_path(at=self.at.to_string())
|
|
@@ -108,8 +111,8 @@ class DeleteElementInSchemaQuery(Query):
|
|
|
108
111
|
"from": self.at.to_string(),
|
|
109
112
|
}
|
|
110
113
|
|
|
111
|
-
sub_query_out = self._render_sub_query_out()
|
|
112
|
-
sub_query_in = self._render_sub_query_in()
|
|
114
|
+
sub_query_out, sub_query_out_args = self._render_sub_query_out()
|
|
115
|
+
sub_query_in, sub_query_in_args = self._render_sub_query_in()
|
|
113
116
|
|
|
114
117
|
self.add_to_query(self.render_match())
|
|
115
118
|
self.add_to_query(self.render_where())
|
|
@@ -138,7 +141,7 @@ class DeleteElementInSchemaQuery(Query):
|
|
|
138
141
|
}
|
|
139
142
|
WITH n1 as element_to_delete, rel_outband1 as rel_outband, p1 as peer_node
|
|
140
143
|
WHERE rel_outband.status = "active"
|
|
141
|
-
CALL {
|
|
144
|
+
CALL (%(sub_query_out_args)s) {
|
|
142
145
|
%(sub_query_out)s
|
|
143
146
|
}
|
|
144
147
|
WITH p2 as peer_node, rel_outband, element_to_delete
|
|
@@ -157,7 +160,7 @@ class DeleteElementInSchemaQuery(Query):
|
|
|
157
160
|
}
|
|
158
161
|
WITH n1 as element_to_delete, rel_inband1 as rel_inband, p1 as peer_node
|
|
159
162
|
WHERE rel_inband.status = "active"
|
|
160
|
-
CALL {
|
|
163
|
+
CALL (%(sub_query_in_args)s) {
|
|
161
164
|
%(sub_query_in)s
|
|
162
165
|
}
|
|
163
166
|
WITH p2 as peer_node, rel_inband, element_to_delete
|
|
@@ -169,5 +172,7 @@ class DeleteElementInSchemaQuery(Query):
|
|
|
169
172
|
"branch_filter": branch_filter,
|
|
170
173
|
"sub_query_out": sub_query_out,
|
|
171
174
|
"sub_query_in": sub_query_in,
|
|
175
|
+
"sub_query_out_args": sub_query_out_args,
|
|
176
|
+
"sub_query_in_args": sub_query_in_args,
|
|
172
177
|
}
|
|
173
178
|
self.add_to_query(query)
|
|
@@ -47,7 +47,6 @@ class NodeDuplicateQuery(Query):
|
|
|
47
47
|
@staticmethod
|
|
48
48
|
def _render_sub_query_per_rel_type(rel_name: str, rel_type: str, rel_dir: GraphRelDirection) -> str:
|
|
49
49
|
subquery = [
|
|
50
|
-
f"WITH peer_node, {rel_name}, active_node, new_node",
|
|
51
50
|
f"WITH peer_node, {rel_name}, active_node, new_node",
|
|
52
51
|
f'WHERE type({rel_name}) = "{rel_type}"',
|
|
53
52
|
]
|
|
@@ -81,28 +80,28 @@ class NodeDuplicateQuery(Query):
|
|
|
81
80
|
return "\n".join(subquery)
|
|
82
81
|
|
|
83
82
|
@classmethod
|
|
84
|
-
def _render_sub_query_out(cls) -> str:
|
|
83
|
+
def _render_sub_query_out(cls) -> tuple[str, str]:
|
|
84
|
+
rel_name = "rel_outband"
|
|
85
|
+
sub_query_out_args = f"peer_node, {rel_name}, active_node, new_node"
|
|
85
86
|
sub_queries_out = [
|
|
86
|
-
cls._render_sub_query_per_rel_type(
|
|
87
|
-
rel_name="rel_outband", rel_type=rel_type, rel_dir=GraphRelDirection.OUTBOUND
|
|
88
|
-
)
|
|
87
|
+
cls._render_sub_query_per_rel_type(rel_name=rel_name, rel_type=rel_type, rel_dir=GraphRelDirection.OUTBOUND)
|
|
89
88
|
for rel_type, field_info in GraphNodeRelationships.model_fields.items()
|
|
90
89
|
if field_info.default.direction in (GraphRelDirection.OUTBOUND, GraphRelDirection.EITHER)
|
|
91
90
|
]
|
|
92
91
|
sub_query_out = "\nUNION\n".join(sub_queries_out)
|
|
93
|
-
return sub_query_out
|
|
92
|
+
return sub_query_out, sub_query_out_args
|
|
94
93
|
|
|
95
94
|
@classmethod
|
|
96
|
-
def _render_sub_query_in(cls) -> str:
|
|
95
|
+
def _render_sub_query_in(cls) -> tuple[str, str]:
|
|
96
|
+
rel_name = "rel_inband"
|
|
97
|
+
sub_query_in_args = f"peer_node, {rel_name}, active_node, new_node"
|
|
97
98
|
sub_queries_in = [
|
|
98
|
-
cls._render_sub_query_per_rel_type(
|
|
99
|
-
rel_name="rel_inband", rel_type=rel_type, rel_dir=GraphRelDirection.INBOUND
|
|
100
|
-
)
|
|
99
|
+
cls._render_sub_query_per_rel_type(rel_name=rel_name, rel_type=rel_type, rel_dir=GraphRelDirection.INBOUND)
|
|
101
100
|
for rel_type, field_info in GraphNodeRelationships.model_fields.items()
|
|
102
101
|
if field_info.default.direction in (GraphRelDirection.INBOUND, GraphRelDirection.EITHER)
|
|
103
102
|
]
|
|
104
103
|
sub_query_in = "\nUNION\n".join(sub_queries_in)
|
|
105
|
-
return sub_query_in
|
|
104
|
+
return sub_query_in, sub_query_in_args
|
|
106
105
|
|
|
107
106
|
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
108
107
|
branch_filter, branch_params = self.branch.get_query_filter_path(at=self.at.to_string())
|
|
@@ -126,8 +125,8 @@ class NodeDuplicateQuery(Query):
|
|
|
126
125
|
"from": self.at.to_string(),
|
|
127
126
|
}
|
|
128
127
|
|
|
129
|
-
sub_query_out = self._render_sub_query_out()
|
|
130
|
-
sub_query_in = self._render_sub_query_in()
|
|
128
|
+
sub_query_out, sub_query_out_args = self._render_sub_query_out()
|
|
129
|
+
sub_query_in, sub_query_in_args = self._render_sub_query_in()
|
|
131
130
|
|
|
132
131
|
self.add_to_query(self.render_match())
|
|
133
132
|
|
|
@@ -155,7 +154,7 @@ class NodeDuplicateQuery(Query):
|
|
|
155
154
|
}
|
|
156
155
|
WITH n1 as active_node, rel_outband1 as rel_outband, p1 as peer_node, new_node
|
|
157
156
|
WHERE rel_outband.status = "active" AND rel_outband.to IS NULL
|
|
158
|
-
CALL {
|
|
157
|
+
CALL (%(sub_query_out_args)s) {
|
|
159
158
|
%(sub_query_out)s
|
|
160
159
|
}
|
|
161
160
|
WITH p2 as peer_node, rel_outband, active_node, new_node
|
|
@@ -174,7 +173,7 @@ class NodeDuplicateQuery(Query):
|
|
|
174
173
|
}
|
|
175
174
|
WITH n1 as active_node, rel_inband1 as rel_inband, p1 as peer_node, new_node
|
|
176
175
|
WHERE rel_inband.status = "active" AND rel_inband.to IS NULL
|
|
177
|
-
CALL {
|
|
176
|
+
CALL (%(sub_query_in_args)s) {
|
|
178
177
|
%(sub_query_in)s
|
|
179
178
|
}
|
|
180
179
|
WITH p2 as peer_node, rel_inband, active_node, new_node
|
|
@@ -188,5 +187,7 @@ class NodeDuplicateQuery(Query):
|
|
|
188
187
|
"labels": ":".join(self.new_node.labels),
|
|
189
188
|
"sub_query_out": sub_query_out,
|
|
190
189
|
"sub_query_in": sub_query_in,
|
|
190
|
+
"sub_query_out_args": sub_query_out_args,
|
|
191
|
+
"sub_query_in_args": sub_query_in_args,
|
|
191
192
|
}
|
|
192
193
|
self.add_to_query(query)
|
|
@@ -47,7 +47,6 @@ class RelationshipDuplicateQuery(Query):
|
|
|
47
47
|
@staticmethod
|
|
48
48
|
def _render_sub_query_per_rel_type(rel_name: str, rel_type: str, direction: GraphRelDirection) -> str:
|
|
49
49
|
subquery = [
|
|
50
|
-
f"WITH peer_node, {rel_name}, active_rel, new_rel",
|
|
51
50
|
f"WITH peer_node, {rel_name}, active_rel, new_rel",
|
|
52
51
|
f'WHERE type({rel_name}) = "{rel_type}"',
|
|
53
52
|
]
|
|
@@ -61,28 +60,32 @@ class RelationshipDuplicateQuery(Query):
|
|
|
61
60
|
return "\n".join(subquery)
|
|
62
61
|
|
|
63
62
|
@classmethod
|
|
64
|
-
def _render_sub_query_out(cls) -> str:
|
|
63
|
+
def _render_sub_query_out(cls) -> tuple[str, str]:
|
|
64
|
+
rel_name = "rel_outband"
|
|
65
|
+
sub_query_out_args = f"peer_node, {rel_name}, active_rel, new_rel"
|
|
65
66
|
sub_queries_out = [
|
|
66
67
|
cls._render_sub_query_per_rel_type(
|
|
67
|
-
rel_name=
|
|
68
|
+
rel_name=rel_name, rel_type=rel_type, direction=GraphRelDirection.OUTBOUND
|
|
68
69
|
)
|
|
69
70
|
for rel_type, rel_def in GraphRelationshipRelationships.model_fields.items()
|
|
70
71
|
if rel_def.default.direction in [GraphRelDirection.OUTBOUND, GraphRelDirection.EITHER]
|
|
71
72
|
]
|
|
72
73
|
sub_query_out = "\nUNION\n".join(sub_queries_out)
|
|
73
|
-
return sub_query_out
|
|
74
|
+
return sub_query_out, sub_query_out_args
|
|
74
75
|
|
|
75
76
|
@classmethod
|
|
76
|
-
def _render_sub_query_in(cls) -> str:
|
|
77
|
+
def _render_sub_query_in(cls) -> tuple[str, str]:
|
|
78
|
+
rel_name = "rel_inband"
|
|
79
|
+
sub_query_in_args = f"peer_node, {rel_name}, active_rel, new_rel"
|
|
77
80
|
sub_queries_in = [
|
|
78
81
|
cls._render_sub_query_per_rel_type(
|
|
79
|
-
rel_name=
|
|
82
|
+
rel_name=rel_name, rel_type=rel_type, direction=GraphRelDirection.INBOUND
|
|
80
83
|
)
|
|
81
84
|
for rel_type, rel_def in GraphRelationshipRelationships.model_fields.items()
|
|
82
85
|
if rel_def.default.direction in [GraphRelDirection.INBOUND, GraphRelDirection.EITHER]
|
|
83
86
|
]
|
|
84
87
|
sub_query_in = "\nUNION\n".join(sub_queries_in)
|
|
85
|
-
return sub_query_in
|
|
88
|
+
return sub_query_in, sub_query_in_args
|
|
86
89
|
|
|
87
90
|
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
88
91
|
branch_filter, branch_params = self.branch.get_query_filter_path(at=self.at.to_string())
|
|
@@ -109,8 +112,8 @@ class RelationshipDuplicateQuery(Query):
|
|
|
109
112
|
"from": self.at.to_string(),
|
|
110
113
|
}
|
|
111
114
|
|
|
112
|
-
sub_query_out = self._render_sub_query_out()
|
|
113
|
-
sub_query_in = self._render_sub_query_in()
|
|
115
|
+
sub_query_out, sub_query_out_args = self._render_sub_query_out()
|
|
116
|
+
sub_query_in, sub_query_in_args = self._render_sub_query_in()
|
|
114
117
|
|
|
115
118
|
self.add_to_query(self.render_match())
|
|
116
119
|
|
|
@@ -138,7 +141,7 @@ class RelationshipDuplicateQuery(Query):
|
|
|
138
141
|
}
|
|
139
142
|
WITH n1 as active_rel, rel_inband1 as rel_inband, p1 as peer_node, new_rel
|
|
140
143
|
WHERE rel_inband.status = "active"
|
|
141
|
-
CALL {
|
|
144
|
+
CALL (%(sub_query_in_args)s) {
|
|
142
145
|
%(sub_query_in)s
|
|
143
146
|
}
|
|
144
147
|
WITH p2 as peer_node, rel_inband, active_rel, new_rel
|
|
@@ -157,7 +160,7 @@ class RelationshipDuplicateQuery(Query):
|
|
|
157
160
|
}
|
|
158
161
|
WITH n1 as active_rel, rel_outband1 as rel_outband, p1 as peer_node, new_rel
|
|
159
162
|
WHERE rel_outband.status = "active"
|
|
160
|
-
CALL {
|
|
163
|
+
CALL (%(sub_query_out_args)s) {
|
|
161
164
|
%(sub_query_out)s
|
|
162
165
|
}
|
|
163
166
|
WITH p2 as peer_node, rel_outband, active_rel, new_rel
|
|
@@ -169,5 +172,7 @@ class RelationshipDuplicateQuery(Query):
|
|
|
169
172
|
"branch_filter": branch_filter,
|
|
170
173
|
"sub_query_out": sub_query_out,
|
|
171
174
|
"sub_query_in": sub_query_in,
|
|
175
|
+
"sub_query_in_args": sub_query_in_args,
|
|
176
|
+
"sub_query_out_args": sub_query_out_args,
|
|
172
177
|
}
|
|
173
178
|
self.add_to_query(query)
|
|
@@ -50,7 +50,6 @@ class NodeAttributeRemoveMigrationQuery01(AttributeMigrationQuery):
|
|
|
50
50
|
|
|
51
51
|
def render_sub_query_per_rel_type(rel_type: str, rel_def: FieldInfo) -> str:
|
|
52
52
|
subquery = [
|
|
53
|
-
"WITH peer_node, rb, active_attr",
|
|
54
53
|
"WITH peer_node, rb, active_attr",
|
|
55
54
|
f'WHERE type(rb) = "{rel_type}"',
|
|
56
55
|
]
|
|
@@ -105,7 +104,7 @@ class NodeAttributeRemoveMigrationQuery01(AttributeMigrationQuery):
|
|
|
105
104
|
}
|
|
106
105
|
WITH a1 as active_attr, r1 as rb, p1 as peer_node
|
|
107
106
|
WHERE rb.status = "active"
|
|
108
|
-
CALL {
|
|
107
|
+
CALL (peer_node, rb, active_attr) {
|
|
109
108
|
%(sub_query_all)s
|
|
110
109
|
}
|
|
111
110
|
WITH p2 as peer_node, rb, active_attr
|
|
@@ -21,7 +21,6 @@ class NodeRemoveMigrationBaseQuery(MigrationQuery):
|
|
|
21
21
|
rel_def: FieldInfo,
|
|
22
22
|
) -> str:
|
|
23
23
|
subquery = [
|
|
24
|
-
f"WITH peer_node, {rel_name}, active_node",
|
|
25
24
|
f"WITH peer_node, {rel_name}, active_node",
|
|
26
25
|
f'WHERE type({rel_name}) = "{rel_type}"',
|
|
27
26
|
]
|
|
@@ -90,7 +89,7 @@ class NodeRemoveMigrationQueryIn(NodeRemoveMigrationBaseQuery):
|
|
|
90
89
|
insert_return: bool = False
|
|
91
90
|
|
|
92
91
|
def render_node_remove_query(self, branch_filter: str) -> str:
|
|
93
|
-
sub_query = self.render_sub_query_in()
|
|
92
|
+
sub_query, sub_query_args = self.render_sub_query_in()
|
|
94
93
|
query = """
|
|
95
94
|
// Process Inbound Relationship
|
|
96
95
|
WITH active_node
|
|
@@ -104,27 +103,29 @@ class NodeRemoveMigrationQueryIn(NodeRemoveMigrationBaseQuery):
|
|
|
104
103
|
}
|
|
105
104
|
WITH n1 as active_node, rel_inband1 as rel_inband, p1 as peer_node
|
|
106
105
|
WHERE rel_inband.status = "active"
|
|
107
|
-
CALL {
|
|
106
|
+
CALL (%(sub_query_args)s) {
|
|
108
107
|
%(sub_query)s
|
|
109
108
|
}
|
|
110
109
|
WITH p2 as peer_node, rel_inband, active_node
|
|
111
110
|
FOREACH (i in CASE WHEN rel_inband.branch IN ["-global-", $branch] THEN [1] ELSE [] END |
|
|
112
111
|
SET rel_inband.to = $current_time
|
|
113
112
|
)
|
|
114
|
-
""" % {"sub_query": sub_query, "branch_filter": branch_filter}
|
|
113
|
+
""" % {"sub_query": sub_query, "sub_query_args": sub_query_args, "branch_filter": branch_filter}
|
|
115
114
|
return query
|
|
116
115
|
|
|
117
|
-
def render_sub_query_in(self) -> str:
|
|
116
|
+
def render_sub_query_in(self) -> tuple[str, str]:
|
|
117
|
+
rel_name = "rel_inband"
|
|
118
|
+
sub_query_in_args = f"peer_node, {rel_name}, active_node"
|
|
118
119
|
sub_queries_in = [
|
|
119
120
|
self.render_sub_query_per_rel_type(
|
|
120
|
-
rel_name=
|
|
121
|
+
rel_name=rel_name,
|
|
121
122
|
rel_type=rel_type,
|
|
122
123
|
rel_def=rel_def,
|
|
123
124
|
)
|
|
124
125
|
for rel_type, rel_def in GraphNodeRelationships.model_fields.items()
|
|
125
126
|
]
|
|
126
127
|
sub_query_in = "\nUNION\n".join(sub_queries_in)
|
|
127
|
-
return sub_query_in
|
|
128
|
+
return sub_query_in, sub_query_in_args
|
|
128
129
|
|
|
129
130
|
def get_nbr_migrations_executed(self) -> int:
|
|
130
131
|
return 0
|
|
@@ -135,7 +136,7 @@ class NodeRemoveMigrationQueryOut(NodeRemoveMigrationBaseQuery):
|
|
|
135
136
|
insert_return: bool = False
|
|
136
137
|
|
|
137
138
|
def render_node_remove_query(self, branch_filter: str) -> str:
|
|
138
|
-
sub_query = self.render_sub_query_out()
|
|
139
|
+
sub_query, sub_query_args = self.render_sub_query_out()
|
|
139
140
|
query = """
|
|
140
141
|
// Process Outbound Relationship
|
|
141
142
|
WITH active_node
|
|
@@ -149,27 +150,29 @@ class NodeRemoveMigrationQueryOut(NodeRemoveMigrationBaseQuery):
|
|
|
149
150
|
}
|
|
150
151
|
WITH n1 as active_node, rel_outband1 as rel_outband, p1 as peer_node
|
|
151
152
|
WHERE rel_outband.status = "active"
|
|
152
|
-
CALL {
|
|
153
|
+
CALL (%(sub_query_args)s) {
|
|
153
154
|
%(sub_query)s
|
|
154
155
|
}
|
|
155
156
|
FOREACH (i in CASE WHEN rel_outband.branch IN ["-global-", $branch] THEN [1] ELSE [] END |
|
|
156
157
|
SET rel_outband.to = $current_time
|
|
157
158
|
)
|
|
158
|
-
""" % {"sub_query": sub_query, "branch_filter": branch_filter}
|
|
159
|
+
""" % {"sub_query": sub_query, "sub_query_args": sub_query_args, "branch_filter": branch_filter}
|
|
159
160
|
|
|
160
161
|
return query
|
|
161
162
|
|
|
162
|
-
def render_sub_query_out(self) -> str:
|
|
163
|
+
def render_sub_query_out(self) -> tuple[str, str]:
|
|
164
|
+
rel_name = "rel_outband"
|
|
165
|
+
sub_query_out_args = f"peer_node, {rel_name}, active_node"
|
|
163
166
|
sub_queries_out = [
|
|
164
167
|
self.render_sub_query_per_rel_type(
|
|
165
|
-
rel_name=
|
|
168
|
+
rel_name=rel_name,
|
|
166
169
|
rel_type=rel_type,
|
|
167
170
|
rel_def=rel_def,
|
|
168
171
|
)
|
|
169
172
|
for rel_type, rel_def in GraphNodeRelationships.model_fields.items()
|
|
170
173
|
]
|
|
171
174
|
sub_query_out = "\nUNION\n".join(sub_queries_out)
|
|
172
|
-
return sub_query_out
|
|
175
|
+
return sub_query_out, sub_query_out_args
|
|
173
176
|
|
|
174
177
|
def get_nbr_migrations_executed(self) -> int:
|
|
175
178
|
return self.num_of_results
|
infrahub/core/node/__init__.py
CHANGED
|
@@ -30,6 +30,7 @@ from infrahub.core.schema import (
|
|
|
30
30
|
RelationshipSchema,
|
|
31
31
|
TemplateSchema,
|
|
32
32
|
)
|
|
33
|
+
from infrahub.core.schema.attribute_parameters import NumberPoolParameters
|
|
33
34
|
from infrahub.core.timestamp import Timestamp
|
|
34
35
|
from infrahub.exceptions import InitializationError, NodeNotFoundError, PoolExhaustedError, ValidationError
|
|
35
36
|
from infrahub.types import ATTRIBUTE_TYPES
|
|
@@ -254,6 +255,12 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
|
|
|
254
255
|
within the create code.
|
|
255
256
|
"""
|
|
256
257
|
|
|
258
|
+
number_pool_parameters: NumberPoolParameters | None = None
|
|
259
|
+
if attribute.schema.kind == "NumberPool" and isinstance(attribute.schema.parameters, NumberPoolParameters):
|
|
260
|
+
attribute.from_pool = {"id": attribute.schema.parameters.number_pool_id}
|
|
261
|
+
attribute.is_default = False
|
|
262
|
+
number_pool_parameters = attribute.schema.parameters
|
|
263
|
+
|
|
257
264
|
if not attribute.from_pool:
|
|
258
265
|
return
|
|
259
266
|
|
|
@@ -262,19 +269,25 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
|
|
|
262
269
|
db=db, id=attribute.from_pool["id"], kind=CoreNumberPool
|
|
263
270
|
)
|
|
264
271
|
except NodeNotFoundError:
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
272
|
+
if number_pool_parameters:
|
|
273
|
+
number_pool = await self._create_number_pool(
|
|
274
|
+
db=db, attribute=attribute, number_pool_parameters=number_pool_parameters
|
|
268
275
|
)
|
|
269
|
-
|
|
270
|
-
|
|
276
|
+
|
|
277
|
+
else:
|
|
278
|
+
errors.append(
|
|
279
|
+
ValidationError(
|
|
280
|
+
{f"{attribute.name}.from_pool": f"The pool requested {attribute.from_pool} was not found."}
|
|
281
|
+
)
|
|
282
|
+
)
|
|
283
|
+
return
|
|
271
284
|
|
|
272
285
|
if (
|
|
273
286
|
number_pool.node.value in [self._schema.kind] + self._schema.inherit_from
|
|
274
287
|
and number_pool.node_attribute.value == attribute.name
|
|
275
288
|
):
|
|
276
289
|
try:
|
|
277
|
-
next_free = await number_pool.get_resource(db=db, branch=self._branch, node=self)
|
|
290
|
+
next_free = await number_pool.get_resource(db=db, branch=self._branch, node=self, attribute=attribute)
|
|
278
291
|
except PoolExhaustedError:
|
|
279
292
|
errors.append(
|
|
280
293
|
ValidationError({f"{attribute.name}.from_pool": f"The pool {number_pool.node.value} is exhausted."})
|
|
@@ -292,6 +305,35 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
|
|
|
292
305
|
)
|
|
293
306
|
)
|
|
294
307
|
|
|
308
|
+
async def _create_number_pool(
|
|
309
|
+
self, db: InfrahubDatabase, attribute: BaseAttribute, number_pool_parameters: NumberPoolParameters
|
|
310
|
+
) -> CoreNumberPool:
|
|
311
|
+
schema = db.schema.get_node_schema(name="CoreNumberPool", duplicate=False)
|
|
312
|
+
|
|
313
|
+
pool_node = self._schema.kind
|
|
314
|
+
schema_attribute = self._schema.get_attribute(attribute.schema.name)
|
|
315
|
+
if schema_attribute.inherited:
|
|
316
|
+
for generic_name in self._schema.inherit_from:
|
|
317
|
+
generic_node = db.schema.get_generic_schema(name=generic_name, duplicate=False)
|
|
318
|
+
if attribute.schema.name in generic_node.attribute_names:
|
|
319
|
+
pool_node = generic_node.kind
|
|
320
|
+
break
|
|
321
|
+
|
|
322
|
+
number_pool = await Node.init(db=db, schema=schema, branch=self._branch)
|
|
323
|
+
await number_pool.new(
|
|
324
|
+
db=db,
|
|
325
|
+
id=number_pool_parameters.number_pool_id,
|
|
326
|
+
name=f"{pool_node}.{attribute.schema.name} [{number_pool_parameters.number_pool_id}]",
|
|
327
|
+
node=pool_node,
|
|
328
|
+
node_attribute=attribute.schema.name,
|
|
329
|
+
start_range=number_pool_parameters.start_range,
|
|
330
|
+
end_range=number_pool_parameters.end_range,
|
|
331
|
+
)
|
|
332
|
+
await number_pool.save(db=db)
|
|
333
|
+
# Do a lookup of the number pool to get the correct mapped type from the registry
|
|
334
|
+
# without this we don't get access to the .get_resource() method.
|
|
335
|
+
return await registry.manager.get_one_by_id_or_default_filter(db=db, id=number_pool.id, kind=CoreNumberPool)
|
|
336
|
+
|
|
295
337
|
async def handle_object_template(self, fields: dict, db: InfrahubDatabase, errors: list) -> None:
|
|
296
338
|
"""Fill the `fields` parameters with values from an object template if one is in use."""
|
|
297
339
|
object_template_field = fields.get(OBJECT_TEMPLATE_RELATIONSHIP_NAME)
|
|
@@ -376,6 +418,9 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
|
|
|
376
418
|
self._computed_jinja2_attributes.append(mandatory_attr)
|
|
377
419
|
continue
|
|
378
420
|
|
|
421
|
+
if mandatory_attribute.kind == "NumberPool":
|
|
422
|
+
continue
|
|
423
|
+
|
|
379
424
|
errors.append(
|
|
380
425
|
ValidationError({mandatory_attr: f"{mandatory_attr} is mandatory for {self.get_kind()}"})
|
|
381
426
|
)
|
|
@@ -392,6 +437,21 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
|
|
|
392
437
|
# -------------------------------------------
|
|
393
438
|
# Generate Attribute and Relationship and assign them
|
|
394
439
|
# -------------------------------------------
|
|
440
|
+
errors.extend(await self._process_fields_relationships(fields=fields, db=db))
|
|
441
|
+
errors.extend(await self._process_fields_attributes(fields=fields, db=db))
|
|
442
|
+
|
|
443
|
+
if errors:
|
|
444
|
+
raise ValidationError(errors)
|
|
445
|
+
|
|
446
|
+
# Check if any post processor have been defined
|
|
447
|
+
# A processor can be used for example to assigne a default value
|
|
448
|
+
for name in self._attributes + self._relationships:
|
|
449
|
+
if hasattr(self, f"process_{name}"):
|
|
450
|
+
await getattr(self, f"process_{name}")(db=db)
|
|
451
|
+
|
|
452
|
+
async def _process_fields_relationships(self, fields: dict, db: InfrahubDatabase) -> list[ValidationError]:
|
|
453
|
+
errors: list[ValidationError] = []
|
|
454
|
+
|
|
395
455
|
for rel_schema in self._schema.relationships:
|
|
396
456
|
self._relationships.append(rel_schema.name)
|
|
397
457
|
|
|
@@ -413,6 +473,11 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
|
|
|
413
473
|
except ValidationError as exc:
|
|
414
474
|
errors.append(exc)
|
|
415
475
|
|
|
476
|
+
return errors
|
|
477
|
+
|
|
478
|
+
async def _process_fields_attributes(self, fields: dict, db: InfrahubDatabase) -> list[ValidationError]:
|
|
479
|
+
errors: list[ValidationError] = []
|
|
480
|
+
|
|
416
481
|
for attr_schema in self._schema.attributes:
|
|
417
482
|
self._attributes.append(attr_schema.name)
|
|
418
483
|
if not self._existing and attr_schema.name in self._computed_jinja2_attributes:
|
|
@@ -441,14 +506,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
|
|
|
441
506
|
except ValidationError as exc:
|
|
442
507
|
errors.append(exc)
|
|
443
508
|
|
|
444
|
-
|
|
445
|
-
raise ValidationError(errors)
|
|
446
|
-
|
|
447
|
-
# Check if any post processor have been defined
|
|
448
|
-
# A processor can be used for example to assigne a default value
|
|
449
|
-
for name in self._attributes + self._relationships:
|
|
450
|
-
if hasattr(self, f"process_{name}"):
|
|
451
|
-
await getattr(self, f"process_{name}")(db=db)
|
|
509
|
+
return errors
|
|
452
510
|
|
|
453
511
|
async def _process_macros(self, db: InfrahubDatabase) -> None:
|
|
454
512
|
schema_branch = db.schema.get_schema_branch(self._branch.name)
|
|
@@ -81,11 +81,15 @@ class CoreIPAddressPool(Node):
|
|
|
81
81
|
return node
|
|
82
82
|
|
|
83
83
|
async def get_next(self, db: InfrahubDatabase, prefixlen: int | None = None) -> IPAddressType:
|
|
84
|
-
# Measure utilization of all prefixes identified as resources
|
|
85
84
|
resources = await self.resources.get_peers(db=db) # type: ignore[attr-defined]
|
|
86
85
|
ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
|
|
87
86
|
|
|
88
|
-
|
|
87
|
+
try:
|
|
88
|
+
weighted_resources = sorted(resources.values(), key=lambda r: r.allocation_weight.value or 0, reverse=True)
|
|
89
|
+
except AttributeError:
|
|
90
|
+
weighted_resources = list(resources.values())
|
|
91
|
+
|
|
92
|
+
for resource in weighted_resources:
|
|
89
93
|
ip_prefix = ipaddress.ip_network(resource.prefix.value) # type: ignore[attr-defined]
|
|
90
94
|
prefix_length = prefixlen or ip_prefix.prefixlen
|
|
91
95
|
|
|
@@ -88,11 +88,15 @@ class CoreIPPrefixPool(Node):
|
|
|
88
88
|
return node
|
|
89
89
|
|
|
90
90
|
async def get_next(self, db: InfrahubDatabase, prefixlen: int) -> IPNetworkType:
|
|
91
|
-
# Measure utilization of all prefixes identified as resources
|
|
92
91
|
resources = await self.resources.get_peers(db=db) # type: ignore[attr-defined]
|
|
93
92
|
ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
|
|
94
93
|
|
|
95
|
-
|
|
94
|
+
try:
|
|
95
|
+
weighted_resources = sorted(resources.values(), key=lambda r: r.allocation_weight.value or 0, reverse=True)
|
|
96
|
+
except AttributeError:
|
|
97
|
+
weighted_resources = list(resources.values())
|
|
98
|
+
|
|
99
|
+
for resource in weighted_resources:
|
|
96
100
|
subnets = await get_subnets(
|
|
97
101
|
db=db,
|
|
98
102
|
ip_prefix=ipaddress.ip_network(resource.prefix.value), # type: ignore[attr-defined]
|