infrahub-server 1.3.3__py3-none-any.whl → 1.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. infrahub/api/schema.py +2 -2
  2. infrahub/core/convert_object_type/conversion.py +10 -0
  3. infrahub/core/diff/enricher/hierarchy.py +7 -3
  4. infrahub/core/diff/query_parser.py +7 -3
  5. infrahub/core/graph/__init__.py +1 -1
  6. infrahub/core/migrations/graph/__init__.py +2 -0
  7. infrahub/core/migrations/graph/m034_find_orphaned_schema_fields.py +84 -0
  8. infrahub/core/migrations/schema/node_attribute_add.py +55 -2
  9. infrahub/core/migrations/shared.py +37 -9
  10. infrahub/core/node/__init__.py +41 -21
  11. infrahub/core/node/resource_manager/number_pool.py +60 -22
  12. infrahub/core/query/resource_manager.py +117 -20
  13. infrahub/core/schema/__init__.py +5 -0
  14. infrahub/core/schema/attribute_parameters.py +6 -0
  15. infrahub/core/schema/attribute_schema.py +6 -0
  16. infrahub/core/schema/manager.py +5 -11
  17. infrahub/core/schema/relationship_schema.py +6 -0
  18. infrahub/core/schema/schema_branch.py +50 -11
  19. infrahub/core/validators/node/attribute.py +15 -0
  20. infrahub/core/validators/tasks.py +12 -4
  21. infrahub/graphql/queries/resource_manager.py +4 -4
  22. infrahub/tasks/registry.py +63 -35
  23. infrahub_sdk/client.py +7 -8
  24. infrahub_sdk/ctl/utils.py +3 -0
  25. infrahub_sdk/node/node.py +6 -6
  26. infrahub_sdk/node/relationship.py +43 -2
  27. infrahub_sdk/yaml.py +13 -7
  28. infrahub_server-1.3.4.dist-info/LICENSE.txt +201 -0
  29. {infrahub_server-1.3.3.dist-info → infrahub_server-1.3.4.dist-info}/METADATA +3 -3
  30. {infrahub_server-1.3.3.dist-info → infrahub_server-1.3.4.dist-info}/RECORD +32 -31
  31. infrahub_server-1.3.3.dist-info/LICENSE.txt +0 -661
  32. {infrahub_server-1.3.3.dist-info → infrahub_server-1.3.4.dist-info}/WHEEL +0 -0
  33. {infrahub_server-1.3.3.dist-info → infrahub_server-1.3.4.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import TYPE_CHECKING, Any
3
+ from typing import TYPE_CHECKING, Any, Generator
4
+
5
+ from pydantic import BaseModel, ConfigDict
4
6
 
5
7
  from infrahub.core import registry
6
8
  from infrahub.core.constants import InfrahubKind, RelationshipStatus
@@ -11,6 +13,13 @@ if TYPE_CHECKING:
11
13
  from infrahub.database import InfrahubDatabase
12
14
 
13
15
 
16
+ class NumberPoolIdentifierData(BaseModel):
17
+ model_config = ConfigDict(frozen=True)
18
+
19
+ value: int
20
+ identifier: str
21
+
22
+
14
23
  class IPAddressPoolGetIdentifiers(Query):
15
24
  name = "ipaddresspool_get_identifiers"
16
25
  type = QueryType.READ
@@ -158,7 +167,7 @@ class NumberPoolGetReserved(Query):
158
167
  def __init__(
159
168
  self,
160
169
  pool_id: str,
161
- identifier: str,
170
+ identifier: str | None = None,
162
171
  **kwargs: dict[str, Any],
163
172
  ) -> None:
164
173
  self.pool_id = pool_id
@@ -176,26 +185,104 @@ class NumberPoolGetReserved(Query):
176
185
 
177
186
  self.params.update(branch_params)
178
187
 
188
+ # If identifier is not provided, we return all reservations for the pool
189
+ identifier_filter = ""
190
+ if self.identifier:
191
+ identifier_filter = "r.identifier = $identifier AND "
192
+ self.params["identifier"] = self.identifier
193
+
179
194
  query = """
180
195
  MATCH (pool:%(number_pool)s { uuid: $pool_id })-[r:IS_RESERVED]->(reservation:AttributeValue)
181
196
  WHERE
182
- r.identifier = $identifier
183
- AND
197
+ %(identifier_filter)s
184
198
  %(branch_filter)s
185
- """ % {"branch_filter": branch_filter, "number_pool": InfrahubKind.NUMBERPOOL}
199
+ """ % {
200
+ "branch_filter": branch_filter,
201
+ "number_pool": InfrahubKind.NUMBERPOOL,
202
+ "identifier_filter": identifier_filter,
203
+ }
186
204
  self.add_to_query(query)
187
- self.return_labels = ["reservation.value"]
205
+ self.return_labels = ["reservation.value AS value", "r.identifier AS identifier"]
188
206
 
189
207
  def get_reservation(self) -> int | None:
190
208
  result = self.get_result()
191
209
  if result:
192
- return result.get_as_optional_type("reservation.value", return_type=int)
210
+ return result.get_as_optional_type("value", return_type=int)
193
211
  return None
194
212
 
213
+ def get_reservations(self) -> Generator[NumberPoolIdentifierData]:
214
+ for result in self.results:
215
+ yield NumberPoolIdentifierData.model_construct(
216
+ value=result.get_as_type("value", return_type=int),
217
+ identifier=result.get_as_type("identifier", return_type=str),
218
+ )
219
+
220
+
221
+ class PoolChangeReserved(Query):
222
+ """Change the identifier on all pools.
223
+ This is useful when a node is being converted to a different type and its ID has changed
224
+ """
225
+
226
+ name = "pool_change_reserved"
227
+ type = QueryType.WRITE
228
+
229
+ def __init__(
230
+ self,
231
+ existing_identifier: str,
232
+ new_identifier: str,
233
+ **kwargs: dict[str, Any],
234
+ ) -> None:
235
+ self.existing_identifier = existing_identifier
236
+ self.new_identifier = new_identifier
237
+
238
+ super().__init__(**kwargs) # type: ignore[arg-type]
239
+
240
+ async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
241
+ self.params["new_identifier"] = self.new_identifier
242
+ self.params["existing_identifier"] = self.existing_identifier
243
+ self.params["at"] = self.at.to_string()
244
+
245
+ branch_filter, branch_params = self.branch.get_query_filter_path(
246
+ at=self.at.to_string(), branch_agnostic=self.branch_agnostic
247
+ )
248
+
249
+ self.params.update(branch_params)
250
+
251
+ global_branch = registry.get_global_branch()
252
+ self.params["rel_prop"] = {
253
+ "branch": global_branch.name,
254
+ "branch_level": global_branch.hierarchy_level,
255
+ "status": RelationshipStatus.ACTIVE.value,
256
+ "from": self.at.to_string(),
257
+ "identifier": self.new_identifier,
258
+ }
259
+
260
+ query = """
261
+ MATCH (pool:Node)-[r:IS_RESERVED]->(resource)
262
+ WHERE
263
+ r.identifier = $existing_identifier
264
+ AND
265
+ %(branch_filter)s
266
+ SET r.to = $at
267
+ CREATE (pool)-[new_rel:IS_RESERVED $rel_prop]->(resource)
268
+ """ % {"branch_filter": branch_filter}
269
+ self.add_to_query(query)
270
+ self.return_labels = ["pool.uuid AS pool_id", "r", "new_rel"]
271
+
272
+
273
+ """
274
+ Important!: The relationship IS_RESERVED for Number is not being cleaned up when the node or the branch is deleted
275
+ I think this is something we should address in the future.
276
+ It works for now because the query has been updated to match the identifier in IS_RESERVED with the UUID of the related node
277
+ But in the future, if we need to use an identifier that is not the UUID, we will need to clean up the relationships
278
+ This will be especially important as we want to support upsert with NumberPool
279
+ """
280
+
195
281
 
196
282
  class NumberPoolGetUsed(Query):
197
283
  name = "number_pool_get_used"
198
284
  type = QueryType.READ
285
+ return_model = NumberPoolIdentifierData
199
286
 
200
287
  def __init__(
201
288
  self,
@@ -219,26 +306,36 @@ class NumberPoolGetUsed(Query):
219
306
  self.params["attribute_name"] = self.pool.node_attribute.value
220
307
 
221
308
  query = """
222
- MATCH (pool:%(number_pool)s { uuid: $pool_id })
223
- CALL (pool) {
224
- MATCH (pool)-[res:IS_RESERVED]->(av:AttributeValue)<-[hv:HAS_VALUE]-(attr:Attribute)
309
+ MATCH (pool:%(number_pool)s { uuid: $pool_id })-[res:IS_RESERVED]->(av:AttributeValue)
310
+ WHERE toInteger(av.value) >= $start_range and toInteger(av.value) <= $end_range
311
+ CALL (pool, res, av) {
312
+ MATCH (pool)-[res]->(av)<-[hv:HAS_VALUE]-(attr:Attribute)<-[ha:HAS_ATTRIBUTE]-(n:%(node)s)
225
313
  WHERE
226
- attr.name = $attribute_name
227
- AND
228
- toInteger(av.value) >= $start_range and toInteger(av.value) <= $end_range
229
- AND
230
- all(r in [res, hv] WHERE (%(branch_filter)s))
231
- RETURN av, (res.status = "active" AND hv.status = "active") AS is_active
314
+ n.uuid = res.identifier AND
315
+ attr.name = $attribute_name AND
316
+ all(r in [res, hv, ha] WHERE (%(branch_filter)s))
317
+ ORDER BY res.branch_level DESC, hv.branch_level DESC, ha.branch_level DESC, res.from DESC, hv.from DESC, ha.from DESC
318
+ RETURN (res.status = "active" AND hv.status = "active" AND ha.status = "active") AS is_active
319
+ LIMIT 1
232
320
  }
233
- WITH av, is_active
234
- WHERE is_active = TRUE
321
+ WITH av, res, is_active
322
+ WHERE is_active = True
235
323
  """ % {
236
324
  "branch_filter": branch_filter,
237
325
  "number_pool": InfrahubKind.NUMBERPOOL,
326
+ "node": self.pool.node.value,
238
327
  }
328
+
239
329
  self.add_to_query(query)
240
- self.return_labels = ["av.value"]
241
- self.order_by = ["av.value"]
330
+ self.return_labels = ["DISTINCT(av.value) as value", "res.identifier as identifier"]
331
+ self.order_by = ["value"]
332
+
333
+ def iter_results(self) -> Generator[NumberPoolIdentifierData]:
334
+ for result in self.results:
335
+ yield self.return_model.model_construct(
336
+ value=result.get_as_type("value", return_type=int),
337
+ identifier=result.get_as_type("identifier", return_type=str),
338
+ )
242
339
 
243
340
 
244
341
  class NumberPoolSetReserved(Query):
@@ -46,6 +46,7 @@ class SchemaExtension(HashableModel):
46
46
 
47
47
  class SchemaRoot(BaseModel):
48
48
  model_config = ConfigDict(extra="forbid")
49
+
49
50
  version: str | None = Field(default=None)
50
51
  generics: list[GenericSchema] = Field(default_factory=list)
51
52
  nodes: list[NodeSchema] = Field(default_factory=list)
@@ -93,6 +94,10 @@ class SchemaRoot(BaseModel):
93
94
  """Return a new `SchemaRoot` after merging `self` with `schema`."""
94
95
  return SchemaRoot.model_validate(deep_merge_dict(dicta=self.model_dump(), dictb=schema.model_dump()))
95
96
 
97
+ def duplicate(self) -> SchemaRoot:
98
+ """Return a duplicate of the current schema."""
99
+ return SchemaRoot.model_validate(self.model_dump())
100
+
96
101
 
97
102
  internal_schema = internal.to_dict()
98
103
 
@@ -165,3 +165,9 @@ class NumberPoolParameters(AttributeParameters):
165
165
  if self.start_range > self.end_range:
166
166
  raise ValueError("`start_range` can't be less than `end_range`")
167
167
  return self
168
+
169
+ def get_pool_size(self) -> int:
170
+ """
171
+ Returns the size of the pool based on the defined ranges.
172
+ """
173
+ return self.end_range - self.start_range + 1
@@ -10,6 +10,7 @@ from infrahub import config
10
10
  from infrahub.core.constants.schema import UpdateSupport
11
11
  from infrahub.core.enums import generate_python_enum
12
12
  from infrahub.core.query.attribute import default_attribute_query_filter
13
+ from infrahub.exceptions import InitializationError
13
14
  from infrahub.types import ATTRIBUTE_KIND_LABELS, ATTRIBUTE_TYPES
14
15
 
15
16
  from .attribute_parameters import (
@@ -67,6 +68,11 @@ class AttributeSchema(GeneratedAttributeSchema):
67
68
  def is_deprecated(self) -> bool:
68
69
  return bool(self.deprecation)
69
70
 
71
+ def get_id(self) -> str:
72
+ if self.id is None:
73
+ raise InitializationError("The attribute schema has not been saved yet and doesn't have an id")
74
+ return self.id
75
+
70
76
  def to_dict(self) -> dict:
71
77
  data = self.model_dump(exclude_unset=True, exclude_none=True)
72
78
  for field_name, value in data.items():
@@ -535,7 +535,7 @@ class SchemaManager(NodeManager):
535
535
  """Delete the node with its attributes and relationships."""
536
536
  branch = await registry.get_branch(branch=branch, db=db)
537
537
 
538
- obj = await self.get_one(id=node.get_id(), branch=branch, db=db)
538
+ obj = await self.get_one(id=node.get_id(), branch=branch, db=db, prefetch_relationships=True)
539
539
  if not obj:
540
540
  raise SchemaNotFoundError(
541
541
  branch_name=branch.name,
@@ -544,16 +544,10 @@ class SchemaManager(NodeManager):
544
544
  )
545
545
 
546
546
  # First delete the attributes and the relationships
547
- items = await self.get_many(
548
- ids=[item.id for item in node.local_attributes + node.local_relationships if item.id],
549
- db=db,
550
- branch=branch,
551
- include_owner=True,
552
- include_source=True,
553
- )
554
-
555
- for item in items.values():
556
- await item.delete(db=db)
547
+ for attr_schema_node in (await obj.attributes.get_peers(db=db)).values():
548
+ await attr_schema_node.delete(db=db)
549
+ for rel_schema_node in (await obj.relationships.get_peers(db=db)).values():
550
+ await rel_schema_node.delete(db=db)
557
551
 
558
552
  await obj.delete(db=db)
559
553
 
@@ -9,6 +9,7 @@ from infrahub import config
9
9
  from infrahub.core.constants import RelationshipDirection
10
10
  from infrahub.core.query import QueryNode, QueryRel, QueryRelDirection
11
11
  from infrahub.core.relationship import Relationship
12
+ from infrahub.exceptions import InitializationError
12
13
 
13
14
  from .generated.relationship_schema import GeneratedRelationshipSchema
14
15
 
@@ -57,6 +58,11 @@ class RelationshipSchema(GeneratedRelationshipSchema):
57
58
  raise ValueError("RelationshipSchema is not initialized")
58
59
  return self.identifier
59
60
 
61
+ def get_id(self) -> str:
62
+ if not self.id:
63
+ raise InitializationError("The relationship schema has not been saved yet and doesn't have an id")
64
+ return self.id
65
+
60
66
  def get_query_arrows(self) -> QueryArrows:
61
67
  """Return (in 4 parts) the 2 arrows for the relationship R1 and R2 based on the direction of the relationship."""
62
68
 
@@ -1588,7 +1588,8 @@ class SchemaBranch:
1588
1588
 
1589
1589
  self.set(name=name, schema=node)
1590
1590
 
1591
- def generate_weight(self) -> None:
1591
+ def _generate_weight_generics(self) -> None:
1592
+ """Generate order_weight for all generic schemas."""
1592
1593
  for name in self.generic_names:
1593
1594
  node = self.get(name=name, duplicate=False)
1594
1595
 
@@ -1606,6 +1607,8 @@ class SchemaBranch:
1606
1607
 
1607
1608
  self.set(name=name, schema=node)
1608
1609
 
1610
+ def _generate_weight_nodes_profiles(self) -> None:
1611
+ """Generate order_weight for all nodes and profiles."""
1609
1612
  for name in self.node_names + self.profile_names:
1610
1613
  node = self.get(name=name, duplicate=False)
1611
1614
 
@@ -1630,6 +1633,33 @@ class SchemaBranch:
1630
1633
 
1631
1634
  self.set(name=name, schema=node)
1632
1635
 
1636
+ def _generate_weight_templates(self) -> None:
1637
+ """Generate order_weight for all templates.
1638
+
1639
+ The order of the fields for the template must respect the order of the node.
1640
+ """
1641
+ for name in self.template_names:
1642
+ template = self.get(name=name, duplicate=True)
1643
+ node = self.get(name=template.name, duplicate=False)
1644
+
1645
+ node_weights = {
1646
+ item.name: item.order_weight
1647
+ for item in node.attributes + node.relationships
1648
+ if item.order_weight is not None
1649
+ }
1650
+
1651
+ for item in template.attributes + template.relationships:
1652
+ if item.order_weight:
1653
+ continue
1654
+ item.order_weight = node_weights[item.name] + 10000 if item.name in node_weights else None
1655
+
1656
+ self.set(name=name, schema=template)
1657
+
1658
+ def generate_weight(self) -> None:
1659
+ self._generate_weight_generics()
1660
+ self._generate_weight_nodes_profiles()
1661
+ self._generate_weight_templates()
1662
+
1633
1663
  def cleanup_inherited_elements(self) -> None:
1634
1664
  for name in self.node_names:
1635
1665
  node = self.get_node(name=name, duplicate=False)
@@ -2038,25 +2068,34 @@ class SchemaBranch:
2038
2068
  if relationship.kind not in [RelationshipKind.ATTRIBUTE, RelationshipKind.GENERIC]
2039
2069
  else relationship.peer
2040
2070
  )
2071
+
2072
+ is_optional = (
2073
+ relationship.optional if is_autogenerated_subtemplate else relationship.kind != RelationshipKind.PARENT
2074
+ )
2075
+ identifier = (
2076
+ f"template_{relationship.identifier}"
2077
+ if relationship.identifier
2078
+ else self._generate_identifier_string(template_schema.kind, rel_template_peer)
2079
+ )
2080
+ label = (
2081
+ f"{relationship.name} template".title()
2082
+ if relationship.kind in [RelationshipKind.COMPONENT, RelationshipKind.PARENT]
2083
+ else relationship.name.title()
2084
+ )
2085
+
2041
2086
  template_schema.relationships.append(
2042
2087
  RelationshipSchema(
2043
2088
  name=relationship.name,
2044
2089
  peer=rel_template_peer,
2045
2090
  kind=relationship.kind,
2046
- optional=relationship.optional
2047
- if is_autogenerated_subtemplate
2048
- else relationship.kind != RelationshipKind.PARENT,
2091
+ optional=is_optional,
2049
2092
  cardinality=relationship.cardinality,
2050
2093
  direction=relationship.direction,
2051
2094
  branch=relationship.branch,
2052
- identifier=f"template_{relationship.identifier}"
2053
- if relationship.identifier
2054
- else self._generate_identifier_string(template_schema.kind, rel_template_peer),
2095
+ identifier=identifier,
2055
2096
  min_count=relationship.min_count,
2056
2097
  max_count=relationship.max_count,
2057
- label=f"{relationship.name} template".title()
2058
- if relationship.kind in [RelationshipKind.COMPONENT, RelationshipKind.PARENT]
2059
- else relationship.name.title(),
2098
+ label=label,
2060
2099
  inherited=relationship.inherited,
2061
2100
  )
2062
2101
  )
@@ -2144,7 +2183,7 @@ class SchemaBranch:
2144
2183
  attr_schema_class = get_attribute_schema_class_for_kind(kind=node_attr.kind)
2145
2184
  attr = attr_schema_class(
2146
2185
  optional=node_attr.optional if is_autogenerated_subtemplate else True,
2147
- **node_attr.model_dump(exclude=["id", "unique", "optional", "read_only"]),
2186
+ **node_attr.model_dump(exclude=["id", "unique", "optional", "read_only", "order_weight"]),
2148
2187
  )
2149
2188
  template.attributes.append(attr)
2150
2189
 
@@ -2,6 +2,9 @@ from __future__ import annotations
2
2
 
3
3
  from typing import TYPE_CHECKING
4
4
 
5
+ from infrahub.core import registry
6
+ from infrahub.core.schema.attribute_parameters import NumberPoolParameters
7
+
5
8
  from ..interface import ConstraintCheckerInterface
6
9
  from ..query import NodeNotPresentValidatorQuery
7
10
 
@@ -31,10 +34,22 @@ class NodeAttributeAddChecker(ConstraintCheckerInterface):
31
34
  grouped_data_paths_list: list[GroupedDataPaths] = []
32
35
  if not request.schema_path.field_name:
33
36
  raise ValueError("field_name is not defined")
37
+
34
38
  attribute_schema = request.node_schema.get_attribute(name=request.schema_path.field_name)
35
39
  if attribute_schema.optional is True or attribute_schema.default_value is not None:
36
40
  return grouped_data_paths_list
37
41
 
42
+ # If the attribute is a NumberPool, we need to ensure that the pool is big enough for all existing nodes
43
+ if attribute_schema.kind == "NumberPool" and isinstance(attribute_schema.parameters, NumberPoolParameters):
44
+ nbr_nodes = await registry.manager.count(db=self.db, branch=self.branch, schema=request.node_schema)
45
+ pool_size = attribute_schema.parameters.get_pool_size()
46
+
47
+ if pool_size < nbr_nodes:
48
+ raise ValueError(
49
+ f"The size of the NumberPool is smaller than the number of existing nodes {pool_size} < {nbr_nodes}."
50
+ )
51
+ return grouped_data_paths_list
52
+
38
53
  for query_class in self.query_classes:
39
54
  # TODO add exception handling
40
55
  query = await query_class.init(
@@ -11,9 +11,7 @@ from infrahub.core.branch import Branch # noqa: TC001
11
11
  from infrahub.core.path import SchemaPath # noqa: TC001
12
12
  from infrahub.core.schema import GenericSchema, NodeSchema
13
13
  from infrahub.core.validators.aggregated_checker import AggregatedConstraintChecker
14
- from infrahub.core.validators.model import (
15
- SchemaConstraintValidatorRequest,
16
- )
14
+ from infrahub.core.validators.model import SchemaConstraintValidatorRequest, SchemaViolation
17
15
  from infrahub.dependencies.registry import get_component_registry
18
16
  from infrahub.services import InfrahubServices # noqa: TC001 needed for prefect flow
19
17
  from infrahub.workflows.utils import add_tags
@@ -84,7 +82,17 @@ async def schema_path_validate(
84
82
  aggregated_constraint_checker = await component_registry.get_component(
85
83
  AggregatedConstraintChecker, db=db, branch=branch
86
84
  )
87
- violations = await aggregated_constraint_checker.run_constraints(constraint_request)
85
+ try:
86
+ violations = await aggregated_constraint_checker.run_constraints(constraint_request)
87
+ except Exception as exc:
88
+ violation = SchemaViolation(
89
+ node_id="unknown",
90
+ node_kind=node_schema.kind,
91
+ display_label=f"Error validating {constraint_name} on {node_schema.kind}",
92
+ full_display_label=f"Error validating {constraint_name} on {node_schema.kind}",
93
+ message=str(exc),
94
+ )
95
+ violations = [violation]
88
96
 
89
97
  return SchemaValidatorPathResponseData(
90
98
  violations=violations, constraint_name=constraint_name, schema_path=schema_path
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
- from graphene import Field, Float, Int, List, NonNull, ObjectType, String
5
+ from graphene import BigInt, Field, Float, Int, List, NonNull, ObjectType, String
6
6
  from infrahub_sdk.utils import extract_fields_first_node
7
7
 
8
8
  from infrahub.core import registry
@@ -33,7 +33,7 @@ class IPPoolUtilizationResource(ObjectType):
33
33
  id = Field(String, required=True, description="The ID of the current resource")
34
34
  display_label = Field(String, required=True, description="The common name of the resource")
35
35
  kind = Field(String, required=True, description="The resource kind")
36
- weight = Field(Int, required=True, description="The relative weight of this resource.")
36
+ weight = Field(BigInt, required=True, description="The relative weight of this resource.")
37
37
  utilization = Field(Float, required=True, description="The overall utilization of the resource.")
38
38
  utilization_branches = Field(
39
39
  Float, required=True, description="The utilization of the resource on all non default branches."
@@ -70,7 +70,7 @@ def _validate_pool_type(pool_id: str, pool: CoreNode | None = None) -> CoreNode:
70
70
 
71
71
 
72
72
  class PoolAllocated(ObjectType):
73
- count = Field(Int, required=True, description="The number of allocations within the selected pool.")
73
+ count = Field(BigInt, required=True, description="The number of allocations within the selected pool.")
74
74
  edges = Field(List(of_type=NonNull(PoolAllocatedEdge), required=True), required=True)
75
75
 
76
76
  @staticmethod
@@ -174,7 +174,7 @@ class PoolAllocated(ObjectType):
174
174
 
175
175
 
176
176
  class PoolUtilization(ObjectType):
177
- count = Field(Int, required=True, description="The number of resources within the selected pool.")
177
+ count = Field(BigInt, required=True, description="The number of resources within the selected pool.")
178
178
  utilization = Field(Float, required=True, description="The overall utilization of the pool.")
179
179
  utilization_branches = Field(Float, required=True, description="The utilization in all non default branches.")
180
180
  utilization_default_branch = Field(
@@ -1,17 +1,77 @@
1
+ from __future__ import annotations
2
+
1
3
  from typing import TYPE_CHECKING
2
4
 
3
5
  from infrahub import lock
4
6
  from infrahub.core import registry
5
- from infrahub.database import InfrahubDatabase
6
7
  from infrahub.log import get_logger
7
8
  from infrahub.worker import WORKER_IDENTITY
8
9
 
9
10
  if TYPE_CHECKING:
10
11
  from infrahub.core.branch import Branch
12
+ from infrahub.core.schema.schema_branch import SchemaBranch
13
+ from infrahub.database import InfrahubDatabase
11
14
 
12
15
  log = get_logger()
13
16
 
14
17
 
18
+ def update_graphql_schema(branch: Branch, schema_branch: SchemaBranch) -> None:
19
+ """
20
+ Update the GraphQL schema for the given branch.
21
+ """
22
+ from infrahub.graphql.manager import GraphQLSchemaManager
23
+
24
+ gqlm = GraphQLSchemaManager.get_manager_for_branch(branch=branch, schema_branch=schema_branch)
25
+ gqlm.get_graphql_schema(
26
+ include_query=True,
27
+ include_mutation=True,
28
+ include_subscription=True,
29
+ include_types=True,
30
+ )
31
+
32
+
33
+ async def create_branch_registry(db: InfrahubDatabase, branch: Branch) -> None:
34
+ """Create a new entry in the registry for a given branch."""
35
+
36
+ log.info("New branch detected, pulling schema", branch=branch.name, worker=WORKER_IDENTITY)
37
+ await registry.schema.load_schema(db=db, branch=branch)
38
+ registry.branch[branch.name] = branch
39
+ schema_branch = registry.schema.get_schema_branch(name=branch.name)
40
+ update_graphql_schema(branch=branch, schema_branch=schema_branch)
41
+
42
+
43
+ async def update_branch_registry(db: InfrahubDatabase, branch: Branch) -> None:
44
+ """Update the registry for a branch if the schema hash has changed."""
45
+
46
+ existing_branch: Branch = registry.branch[branch.name]
47
+
48
+ if not existing_branch.schema_hash:
49
+ log.warning("Branch schema hash is not set, cannot update branch registry")
50
+ return
51
+
52
+ if existing_branch.schema_hash and existing_branch.schema_hash.main == branch.active_schema_hash.main:
53
+ log.debug(
54
+ "Branch schema hash is the same, no need to update branch registry",
55
+ branch=branch.name,
56
+ hash=existing_branch.schema_hash.main,
57
+ worker=WORKER_IDENTITY,
58
+ )
59
+ return
60
+
61
+ log.info(
62
+ "New hash detected",
63
+ branch=branch.name,
64
+ hash_current=existing_branch.schema_hash.main,
65
+ hash_new=branch.active_schema_hash.main,
66
+ worker=WORKER_IDENTITY,
67
+ )
68
+ await registry.schema.load_schema(db=db, branch=branch)
69
+ registry.branch[branch.name] = branch
70
+ schema_branch = registry.schema.get_schema_branch(name=branch.name)
71
+
72
+ update_graphql_schema(branch=branch, schema_branch=schema_branch)
73
+
74
+
15
75
  async def refresh_branches(db: InfrahubDatabase) -> None:
16
76
  """Pull all the branches from the database and update the registry.
17
77
 
@@ -24,41 +84,9 @@ async def refresh_branches(db: InfrahubDatabase) -> None:
24
84
  branches = await registry.branch_object.get_list(db=db)
25
85
  for new_branch in branches:
26
86
  if new_branch.name in registry.branch:
27
- branch_registry: Branch = registry.branch[new_branch.name]
28
- if (
29
- branch_registry.schema_hash
30
- and branch_registry.schema_hash.main != new_branch.active_schema_hash.main
31
- ):
32
- log.info(
33
- "New hash detected",
34
- branch=new_branch.name,
35
- hash_current=branch_registry.schema_hash.main,
36
- hash_new=new_branch.active_schema_hash.main,
37
- worker=WORKER_IDENTITY,
38
- )
39
- await registry.schema.load_schema(db=db, branch=new_branch)
40
- registry.branch[new_branch.name] = new_branch
41
- schema_branch = registry.schema.get_schema_branch(name=new_branch.name)
42
- gqlm = GraphQLSchemaManager.get_manager_for_branch(branch=new_branch, schema_branch=schema_branch)
43
- gqlm.get_graphql_schema(
44
- include_query=True,
45
- include_mutation=True,
46
- include_subscription=True,
47
- include_types=True,
48
- )
49
-
87
+ await update_branch_registry(db=db, branch=new_branch)
50
88
  else:
51
- log.info("New branch detected, pulling schema", branch=new_branch.name, worker=WORKER_IDENTITY)
52
- await registry.schema.load_schema(db=db, branch=new_branch)
53
- registry.branch[new_branch.name] = new_branch
54
- schema_branch = registry.schema.get_schema_branch(name=new_branch.name)
55
- gqlm = GraphQLSchemaManager.get_manager_for_branch(branch=new_branch, schema_branch=schema_branch)
56
- gqlm.get_graphql_schema(
57
- include_query=True,
58
- include_mutation=True,
59
- include_subscription=True,
60
- include_types=True,
61
- )
89
+ await create_branch_registry(db=db, branch=new_branch)
62
90
 
63
91
  purged_branches = await registry.purge_inactive_branches(db=db, active_branches=branches)
64
92
  purged_branches.update(