infrahub-server 1.3.0a0__py3-none-any.whl → 1.3.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. infrahub/actions/tasks.py +4 -11
  2. infrahub/branch/__init__.py +0 -0
  3. infrahub/branch/tasks.py +29 -0
  4. infrahub/branch/triggers.py +22 -0
  5. infrahub/cli/db.py +2 -2
  6. infrahub/computed_attribute/gather.py +3 -1
  7. infrahub/computed_attribute/tasks.py +23 -29
  8. infrahub/core/attribute.py +3 -3
  9. infrahub/core/constants/__init__.py +10 -0
  10. infrahub/core/constants/database.py +1 -0
  11. infrahub/core/constants/infrahubkind.py +2 -0
  12. infrahub/core/convert_object_type/conversion.py +1 -1
  13. infrahub/core/diff/query/save.py +67 -40
  14. infrahub/core/diff/query/time_range_query.py +0 -1
  15. infrahub/core/graph/__init__.py +1 -1
  16. infrahub/core/migrations/graph/__init__.py +6 -0
  17. infrahub/core/migrations/graph/m013_convert_git_password_credential.py +0 -2
  18. infrahub/core/migrations/graph/m029_duplicates_cleanup.py +662 -0
  19. infrahub/core/migrations/graph/m030_illegal_edges.py +82 -0
  20. infrahub/core/migrations/query/attribute_add.py +13 -9
  21. infrahub/core/migrations/query/attribute_rename.py +2 -4
  22. infrahub/core/migrations/query/delete_element_in_schema.py +16 -11
  23. infrahub/core/migrations/query/node_duplicate.py +16 -15
  24. infrahub/core/migrations/query/relationship_duplicate.py +16 -12
  25. infrahub/core/migrations/schema/node_attribute_remove.py +1 -2
  26. infrahub/core/migrations/schema/node_remove.py +16 -14
  27. infrahub/core/node/__init__.py +74 -14
  28. infrahub/core/node/base.py +1 -1
  29. infrahub/core/node/resource_manager/ip_address_pool.py +6 -2
  30. infrahub/core/node/resource_manager/ip_prefix_pool.py +6 -2
  31. infrahub/core/node/resource_manager/number_pool.py +31 -5
  32. infrahub/core/node/standard.py +6 -1
  33. infrahub/core/path.py +1 -1
  34. infrahub/core/protocols.py +10 -0
  35. infrahub/core/query/node.py +1 -1
  36. infrahub/core/query/relationship.py +4 -6
  37. infrahub/core/query/standard_node.py +19 -5
  38. infrahub/core/relationship/constraints/peer_relatives.py +72 -0
  39. infrahub/core/relationship/model.py +1 -1
  40. infrahub/core/schema/attribute_parameters.py +129 -5
  41. infrahub/core/schema/attribute_schema.py +62 -14
  42. infrahub/core/schema/basenode_schema.py +2 -2
  43. infrahub/core/schema/definitions/core/__init__.py +16 -2
  44. infrahub/core/schema/definitions/core/group.py +45 -0
  45. infrahub/core/schema/definitions/core/resource_pool.py +29 -0
  46. infrahub/core/schema/definitions/internal.py +25 -4
  47. infrahub/core/schema/generated/attribute_schema.py +12 -5
  48. infrahub/core/schema/generated/relationship_schema.py +6 -1
  49. infrahub/core/schema/manager.py +7 -2
  50. infrahub/core/schema/schema_branch.py +69 -5
  51. infrahub/core/validators/__init__.py +8 -0
  52. infrahub/core/validators/attribute/choices.py +0 -1
  53. infrahub/core/validators/attribute/enum.py +0 -1
  54. infrahub/core/validators/attribute/kind.py +0 -1
  55. infrahub/core/validators/attribute/length.py +0 -1
  56. infrahub/core/validators/attribute/min_max.py +118 -0
  57. infrahub/core/validators/attribute/number_pool.py +106 -0
  58. infrahub/core/validators/attribute/optional.py +0 -2
  59. infrahub/core/validators/attribute/regex.py +0 -1
  60. infrahub/core/validators/enum.py +5 -0
  61. infrahub/core/validators/tasks.py +1 -1
  62. infrahub/database/__init__.py +16 -4
  63. infrahub/database/validation.py +100 -0
  64. infrahub/dependencies/builder/constraint/grouped/node_runner.py +2 -0
  65. infrahub/dependencies/builder/constraint/relationship_manager/peer_relatives.py +8 -0
  66. infrahub/dependencies/builder/diff/deserializer.py +1 -1
  67. infrahub/dependencies/registry.py +2 -0
  68. infrahub/events/models.py +1 -1
  69. infrahub/git/base.py +5 -3
  70. infrahub/git/integrator.py +102 -3
  71. infrahub/graphql/mutations/main.py +1 -1
  72. infrahub/graphql/mutations/resource_manager.py +54 -6
  73. infrahub/graphql/queries/resource_manager.py +7 -1
  74. infrahub/graphql/queries/task.py +10 -0
  75. infrahub/graphql/resolvers/many_relationship.py +1 -1
  76. infrahub/graphql/resolvers/resolver.py +2 -2
  77. infrahub/graphql/resolvers/single_relationship.py +1 -1
  78. infrahub/graphql/types/task_log.py +3 -2
  79. infrahub/menu/menu.py +8 -7
  80. infrahub/message_bus/operations/refresh/registry.py +3 -3
  81. infrahub/patch/queries/delete_duplicated_edges.py +40 -29
  82. infrahub/pools/number.py +5 -3
  83. infrahub/pools/registration.py +22 -0
  84. infrahub/pools/tasks.py +56 -0
  85. infrahub/schema/__init__.py +0 -0
  86. infrahub/schema/tasks.py +27 -0
  87. infrahub/schema/triggers.py +23 -0
  88. infrahub/task_manager/task.py +44 -4
  89. infrahub/trigger/catalogue.py +4 -0
  90. infrahub/trigger/models.py +5 -4
  91. infrahub/trigger/setup.py +26 -2
  92. infrahub/trigger/tasks.py +1 -1
  93. infrahub/types.py +6 -0
  94. infrahub/webhook/tasks.py +6 -9
  95. infrahub/workflows/catalogue.py +27 -1
  96. infrahub_sdk/client.py +43 -10
  97. infrahub_sdk/node/__init__.py +39 -0
  98. infrahub_sdk/node/attribute.py +122 -0
  99. infrahub_sdk/node/constants.py +21 -0
  100. infrahub_sdk/{node.py → node/node.py} +50 -749
  101. infrahub_sdk/node/parsers.py +15 -0
  102. infrahub_sdk/node/property.py +24 -0
  103. infrahub_sdk/node/related_node.py +266 -0
  104. infrahub_sdk/node/relationship.py +302 -0
  105. infrahub_sdk/protocols.py +112 -0
  106. infrahub_sdk/protocols_base.py +34 -2
  107. infrahub_sdk/query_groups.py +13 -2
  108. infrahub_sdk/schema/main.py +1 -0
  109. infrahub_sdk/schema/repository.py +16 -0
  110. infrahub_sdk/spec/object.py +1 -1
  111. infrahub_sdk/store.py +1 -1
  112. infrahub_sdk/testing/schemas/car_person.py +1 -0
  113. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/METADATA +3 -3
  114. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/RECORD +122 -100
  115. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/WHEEL +1 -1
  116. infrahub_testcontainers/container.py +239 -64
  117. infrahub_testcontainers/docker-compose-cluster.test.yml +321 -0
  118. infrahub_testcontainers/docker-compose.test.yml +1 -0
  119. infrahub_testcontainers/helpers.py +15 -1
  120. infrahub_testcontainers/plugin.py +9 -0
  121. infrahub/patch/queries/consolidate_duplicated_nodes.py +0 -106
  122. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/LICENSE.txt +0 -0
  123. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/entry_points.txt +0 -0
@@ -3,6 +3,7 @@ from .builder.constraint.node.grouped_uniqueness import NodeGroupedUniquenessCon
3
3
  from .builder.constraint.node.uniqueness import NodeAttributeUniquenessConstraintDependency
4
4
  from .builder.constraint.relationship_manager.count import RelationshipCountConstraintDependency
5
5
  from .builder.constraint.relationship_manager.peer_kind import RelationshipPeerKindConstraintDependency
6
+ from .builder.constraint.relationship_manager.peer_relatives import RelationshipPeerRelativesConstraintDependency
6
7
  from .builder.constraint.relationship_manager.profiles_kind import RelationshipProfilesKindConstraintDependency
7
8
  from .builder.constraint.schema.aggregated import AggregatedSchemaConstraintsDependency
8
9
  from .builder.constraint.schema.attribute_regex import SchemaAttributeRegexConstraintDependency
@@ -37,6 +38,7 @@ def build_component_registry() -> ComponentDependencyRegistry:
37
38
  component_registry.track_dependency(RelationshipCountConstraintDependency)
38
39
  component_registry.track_dependency(RelationshipProfilesKindConstraintDependency)
39
40
  component_registry.track_dependency(RelationshipPeerKindConstraintDependency)
41
+ component_registry.track_dependency(RelationshipPeerRelativesConstraintDependency)
40
42
  component_registry.track_dependency(NodeConstraintRunnerDependency)
41
43
  component_registry.track_dependency(NodeDeleteValidatorDependency)
42
44
  component_registry.track_dependency(IpamKindsGetterDependency)
infrahub/events/models.py CHANGED
@@ -8,7 +8,7 @@ from pydantic import BaseModel, Field, PrivateAttr, model_validator
8
8
 
9
9
  from infrahub import __version__
10
10
  from infrahub.auth import AccountSession, AuthType
11
- from infrahub.context import InfrahubContext # noqa: TC001
11
+ from infrahub.context import InfrahubContext
12
12
  from infrahub.core.branch import Branch # noqa: TC001
13
13
  from infrahub.message_bus import InfrahubMessage, Meta
14
14
  from infrahub.worker import WORKER_IDENTITY
infrahub/git/base.py CHANGED
@@ -162,6 +162,11 @@ class InfrahubRepositoryBase(BaseModel, ABC):
162
162
  infrahub_branch_name: str | None = Field(None, description="Infrahub branch on which to sync the remote repository")
163
163
  model_config = ConfigDict(arbitrary_types_allowed=True, ignored_types=(Flow, Task))
164
164
 
165
+ def get_client(self) -> InfrahubClient:
166
+ if self.client is None:
167
+ raise ValueError("Client is not set")
168
+ return self.client
169
+
165
170
  @property
166
171
  def sdk(self) -> InfrahubClient:
167
172
  if self.client:
@@ -445,9 +450,6 @@ class InfrahubRepositoryBase(BaseModel, ABC):
445
450
 
446
451
  return [Worktree.init(response) for response in responses]
447
452
 
448
- def get_client(self) -> InfrahubClient:
449
- return self.sdk
450
-
451
453
  def get_location(self) -> str:
452
454
  if self.location:
453
455
  return self.location
@@ -29,10 +29,12 @@ from infrahub_sdk.schema.repository import (
29
29
  InfrahubPythonTransformConfig,
30
30
  InfrahubRepositoryConfig,
31
31
  )
32
+ from infrahub_sdk.spec.menu import MenuFile
33
+ from infrahub_sdk.spec.object import ObjectFile
32
34
  from infrahub_sdk.template import Jinja2Template
33
35
  from infrahub_sdk.template.exceptions import JinjaTemplateError
34
36
  from infrahub_sdk.utils import compare_lists
35
- from infrahub_sdk.yaml import SchemaFile
37
+ from infrahub_sdk.yaml import InfrahubFile, SchemaFile
36
38
  from prefect import flow, task
37
39
  from prefect.cache_policies import NONE
38
40
  from prefect.logging import get_run_logger
@@ -40,7 +42,7 @@ from pydantic import BaseModel, Field
40
42
  from pydantic import ValidationError as PydanticValidationError
41
43
  from typing_extensions import Self
42
44
 
43
- from infrahub.core.constants import ArtifactStatus, ContentType, InfrahubKind, RepositorySyncStatus
45
+ from infrahub.core.constants import ArtifactStatus, ContentType, InfrahubKind, RepositoryObjects, RepositorySyncStatus
44
46
  from infrahub.core.registry import registry
45
47
  from infrahub.events.artifact_action import ArtifactCreatedEvent, ArtifactUpdatedEvent
46
48
  from infrahub.events.models import EventMeta
@@ -54,6 +56,7 @@ if TYPE_CHECKING:
54
56
  import types
55
57
 
56
58
  from infrahub_sdk.checks import InfrahubCheck
59
+ from infrahub_sdk.ctl.utils import YamlFileVar
57
60
  from infrahub_sdk.schema.repository import InfrahubRepositoryArtifactDefinitionConfig
58
61
  from infrahub_sdk.transforms import InfrahubTransform
59
62
 
@@ -159,7 +162,7 @@ class InfrahubRepositoryIntegrator(InfrahubRepositoryBase):
159
162
  async def ensure_location_is_defined(self) -> None:
160
163
  if self.location:
161
164
  return
162
- client = self.get_client()
165
+ client = self.sdk
163
166
  repo = await client.get(
164
167
  kind=CoreGenericRepository, name__value=self.name, exclude=["tags", "credential"], raise_when_missing=True
165
168
  )
@@ -179,6 +182,7 @@ class InfrahubRepositoryIntegrator(InfrahubRepositoryBase):
179
182
 
180
183
  config_file = await self.get_repository_config(branch_name=infrahub_branch_name, commit=commit) # type: ignore[misc]
181
184
  sync_status = RepositorySyncStatus.IN_SYNC if config_file else RepositorySyncStatus.ERROR_IMPORT
185
+
182
186
  error: Exception | None = None
183
187
 
184
188
  try:
@@ -189,6 +193,17 @@ class InfrahubRepositoryIntegrator(InfrahubRepositoryBase):
189
193
  branch_name=infrahub_branch_name, commit=commit, config_file=config_file
190
194
  ) # type: ignore[misc]
191
195
 
196
+ await self.import_objects(
197
+ branch_name=infrahub_branch_name,
198
+ commit=commit,
199
+ config_file=config_file,
200
+ ) # type: ignore[misc]
201
+ await self.import_objects(
202
+ branch_name=infrahub_branch_name,
203
+ commit=commit,
204
+ config_file=config_file,
205
+ ) # type: ignore[misc]
206
+
192
207
  await self.import_all_python_files( # type: ignore[call-overload]
193
208
  branch_name=infrahub_branch_name, commit=commit, config_file=config_file
194
209
  ) # type: ignore[misc]
@@ -815,6 +830,80 @@ class InfrahubRepositoryIntegrator(InfrahubRepositoryBase):
815
830
  log.info(f"TransformPython {transform_name!r} not found locally, deleting")
816
831
  await transform_definition_in_graph[transform_name].delete()
817
832
 
833
+ async def _load_yamlfile_from_disk(self, paths: list[Path], file_type: type[YamlFileVar]) -> list[YamlFileVar]:
834
+ data_files = file_type.load_from_disk(paths=paths)
835
+
836
+ for data_file in data_files:
837
+ if not data_file.valid or not data_file.content:
838
+ raise ValueError(f"{data_file.error_message} ({data_file.location})")
839
+
840
+ return data_files
841
+
842
+ async def _load_objects(
843
+ self,
844
+ paths: list[Path],
845
+ branch: str,
846
+ file_type: type[InfrahubFile],
847
+ ) -> None:
848
+ """Load one or multiple objects files into Infrahub."""
849
+
850
+ log = get_run_logger()
851
+ files = await self._load_yamlfile_from_disk(paths=paths, file_type=file_type)
852
+
853
+ for file in files:
854
+ await file.validate_format(client=self.sdk, branch=branch)
855
+ schema = await self.sdk.schema.get(kind=file.spec.kind, branch=branch)
856
+ if not schema.human_friendly_id and not schema.default_filter:
857
+ raise ValueError(
858
+ f"Schemas of objects or menus defined within {file.location} "
859
+ "should have a `human_friendly_id` defined to avoid creating duplicated objects."
860
+ )
861
+
862
+ for file in files:
863
+ log.info(f"Loading objects defined in {file.location}")
864
+ await file.process(client=self.sdk, branch=branch)
865
+
866
+ async def _import_file_paths(
867
+ self, branch_name: str, commit: str, files_pathes: list[Path], object_type: RepositoryObjects
868
+ ) -> None:
869
+ branch_wt = self.get_worktree(identifier=commit or branch_name)
870
+ file_pathes = [branch_wt.directory / file_path for file_path in files_pathes]
871
+
872
+ # We currently assume there can't be concurrent imports, but if so, we might need to clone the client before tracking here.
873
+ async with self.sdk.start_tracking(
874
+ identifier=f"group-repo-{object_type.value}-{self.id}",
875
+ delete_unused_nodes=True,
876
+ branch=branch_name,
877
+ group_type="CoreRepositoryGroup",
878
+ group_params={"content": object_type.value, "repository": str(self.id)},
879
+ ):
880
+ file_type = repo_object_type_to_file_type(object_type)
881
+ await self._load_objects(
882
+ paths=file_pathes,
883
+ branch=branch_name,
884
+ file_type=file_type,
885
+ )
886
+
887
+ @task(name="import-objects", task_run_name="Import Objects", cache_policy=NONE) # type: ignore[arg-type]
888
+ async def import_objects(
889
+ self,
890
+ branch_name: str,
891
+ commit: str,
892
+ config_file: InfrahubRepositoryConfig,
893
+ ) -> None:
894
+ await self._import_file_paths(
895
+ branch_name=branch_name,
896
+ commit=commit,
897
+ files_pathes=config_file.objects,
898
+ object_type=RepositoryObjects.OBJECT,
899
+ )
900
+ await self._import_file_paths(
901
+ branch_name=branch_name,
902
+ commit=commit,
903
+ files_pathes=config_file.menus,
904
+ object_type=RepositoryObjects.MENU,
905
+ )
906
+
818
907
  @task(name="check-definition-get", task_run_name="Get Check Definition", cache_policy=NONE) # type: ignore[arg-type]
819
908
  async def get_check_definition(
820
909
  self,
@@ -1342,3 +1431,13 @@ class InfrahubRepositoryIntegrator(InfrahubRepositoryBase):
1342
1431
 
1343
1432
  await self.service.event.send(event=event)
1344
1433
  return ArtifactGenerateResult(changed=True, checksum=checksum, storage_id=storage_id, artifact_id=artifact.id)
1434
+
1435
+
1436
+ def repo_object_type_to_file_type(repo_object: RepositoryObjects) -> type[InfrahubFile]:
1437
+ match repo_object:
1438
+ case RepositoryObjects.OBJECT:
1439
+ return ObjectFile
1440
+ case RepositoryObjects.MENU:
1441
+ return MenuFile
1442
+ case _:
1443
+ raise ValueError(f"Unknown repository object type: {repo_object}")
@@ -382,7 +382,7 @@ class InfrahubMutationMixin:
382
382
  except HFIDViolatedError as exc:
383
383
  # Only the HFID constraint has been violated, it means the node exists and we can update without rerunning constraints
384
384
  if len(exc.matching_nodes_ids) > 1:
385
- raise RuntimeError(f"Multiple {schema_name} nodes have the same hfid (database corrupted)") from exc
385
+ raise RuntimeError(f"Multiple {schema_name} nodes have the same hfid") from exc
386
386
  node_id = list(exc.matching_nodes_ids)[0]
387
387
  node = await NodeManager.get_one(db=db, id=node_id, kind=schema_name, branch=branch, raise_on_error=True)
388
388
  updated_obj, mutation = await cls._call_mutate_update(
@@ -6,15 +6,18 @@ from graphene import Boolean, Field, InputField, InputObjectType, Int, List, Mut
6
6
  from graphene.types.generic import GenericScalar
7
7
  from typing_extensions import Self
8
8
 
9
- from infrahub.core import registry
9
+ from infrahub.core import protocols, registry
10
10
  from infrahub.core.constants import InfrahubKind
11
11
  from infrahub.core.ipam.constants import PrefixMemberType
12
+ from infrahub.core.manager import NodeManager
12
13
  from infrahub.core.schema import NodeSchema
14
+ from infrahub.core.schema.attribute_parameters import NumberAttributeParameters
13
15
  from infrahub.database import retry_db_transaction
14
16
  from infrahub.exceptions import QueryValidationError, SchemaNotFoundError, ValidationError
17
+ from infrahub.pools.registration import get_branches_with_schema_number_pool
15
18
 
16
19
  from ..queries.resource_manager import PoolAllocatedNode
17
- from .main import InfrahubMutationMixin, InfrahubMutationOptions
20
+ from .main import DeleteResult, InfrahubMutationMixin, InfrahubMutationOptions
18
21
 
19
22
  if TYPE_CHECKING:
20
23
  from graphql import GraphQLResolveInfo
@@ -177,14 +180,16 @@ class InfrahubNumberPoolMutation(InfrahubMutationMixin, Mutation):
177
180
  database: InfrahubDatabase | None = None, # noqa: ARG003
178
181
  ) -> Any:
179
182
  try:
180
- pool_node = registry.schema.get(name=data["node"].value)
181
- if not pool_node.is_generic_schema and not pool_node.is_node_schema:
183
+ schema_node = registry.schema.get(name=data["node"].value)
184
+ if not schema_node.is_generic_schema and not schema_node.is_node_schema:
182
185
  raise ValidationError(input_value="The selected model is not a Node or a Generic")
183
186
  except SchemaNotFoundError as exc:
184
187
  exc.message = "The selected model does not exist"
185
188
  raise exc
186
189
 
187
- attributes = [attribute for attribute in pool_node.attributes if attribute.name == data["node_attribute"].value]
190
+ attributes = [
191
+ attribute for attribute in schema_node.attributes if attribute.name == data["node_attribute"].value
192
+ ]
188
193
  if not attributes:
189
194
  raise ValidationError(input_value="The selected attribute doesn't exist in the selected model")
190
195
 
@@ -192,9 +197,22 @@ class InfrahubNumberPoolMutation(InfrahubMutationMixin, Mutation):
192
197
  if attribute.kind != "Number":
193
198
  raise ValidationError(input_value="The selected attribute is not of the kind Number")
194
199
 
195
- if data["start_range"].value > data["end_range"].value:
200
+ start_range = data["start_range"].value
201
+ end_range = data["end_range"].value
202
+ if start_range > end_range:
196
203
  raise ValidationError(input_value="start_range can't be larger than end_range")
197
204
 
205
+ if not isinstance(attribute.parameters, NumberAttributeParameters):
206
+ raise ValidationError(
207
+ input_value="The selected attribute parameters are not of the kind NumberAttributeParameters"
208
+ )
209
+
210
+ if attribute.parameters.min_value is not None and start_range < attribute.parameters.min_value:
211
+ raise ValidationError(input_value="start_range can't be less than min_value")
212
+
213
+ if attribute.parameters.max_value is not None and end_range > attribute.parameters.max_value:
214
+ raise ValidationError(input_value="end_range can't be larger than max_value")
215
+
198
216
  return await super().mutate_create(info=info, data=data, branch=branch)
199
217
 
200
218
  @classmethod
@@ -221,3 +239,33 @@ class InfrahubNumberPoolMutation(InfrahubMutationMixin, Mutation):
221
239
  raise ValidationError(input_value="start_range can't be larger than end_range")
222
240
 
223
241
  return number_pool, result
242
+
243
+ @classmethod
244
+ @retry_db_transaction(name="resource_manager_update")
245
+ async def mutate_delete(
246
+ cls,
247
+ info: GraphQLResolveInfo,
248
+ data: InputObjectType,
249
+ branch: Branch,
250
+ ) -> DeleteResult:
251
+ graphql_context: GraphqlContext = info.context
252
+
253
+ number_pool = await NodeManager.find_object(
254
+ db=graphql_context.db,
255
+ kind=protocols.CoreNumberPool,
256
+ id=data.get("id"),
257
+ hfid=data.get("hfid"),
258
+ branch=branch,
259
+ )
260
+
261
+ violating_branches = get_branches_with_schema_number_pool(
262
+ kind=number_pool.node.value, attribute_name=number_pool.node_attribute.value
263
+ )
264
+
265
+ if violating_branches:
266
+ raise ValidationError(
267
+ input_value=f"Unable to delete number pool {number_pool.node.value}.{number_pool.node_attribute.value}"
268
+ f" is in use (branches: {','.join(violating_branches)})"
269
+ )
270
+
271
+ return await super().mutate_delete(info=info, data=data, branch=branch)
@@ -306,7 +306,13 @@ async def resolve_number_pool_allocation(
306
306
  async def resolve_number_pool_utilization(
307
307
  db: InfrahubDatabase, pool: CoreNode, at: Timestamp | str | None, branch: Branch
308
308
  ) -> dict:
309
- number_pool = NumberUtilizationGetter(db=db, pool=pool, at=at, branch=branch)
309
+ """
310
+ Returns a mapping containg utilization info of a number pool.
311
+ The utilization is calculated as the percentage of the total number of values in the pool that are not excluded for the corresponding attribute.
312
+ """
313
+
314
+ core_number_pool = await registry.manager.get_one_by_id_or_default_filter(db=db, id=pool.id, kind="CoreNumberPool")
315
+ number_pool = NumberUtilizationGetter(db=db, pool=core_number_pool, at=at, branch=branch)
310
316
  await number_pool.load_data()
311
317
 
312
318
  return {
@@ -32,6 +32,8 @@ class Tasks(ObjectType):
32
32
  workflow: list[str] | None = None,
33
33
  related_node__ids: list | None = None,
34
34
  q: str | None = None,
35
+ log_limit: int | None = None,
36
+ log_offset: int | None = None,
35
37
  ) -> dict[str, Any]:
36
38
  related_nodes = related_node__ids or []
37
39
  ids = ids or []
@@ -45,6 +47,8 @@ class Tasks(ObjectType):
45
47
  statuses=state,
46
48
  workflows=workflow,
47
49
  related_nodes=related_nodes,
50
+ log_limit=log_limit,
51
+ log_offset=log_offset,
48
52
  )
49
53
 
50
54
  @staticmethod
@@ -71,6 +75,8 @@ class Tasks(ObjectType):
71
75
  branch: str | None = None,
72
76
  limit: int | None = None,
73
77
  offset: int | None = None,
78
+ log_limit: int | None = None,
79
+ log_offset: int | None = None,
74
80
  ) -> dict[str, Any]:
75
81
  graphql_context: GraphqlContext = info.context
76
82
  fields = await extract_fields_first_node(info)
@@ -87,6 +93,8 @@ class Tasks(ObjectType):
87
93
  related_nodes=related_nodes,
88
94
  limit=limit,
89
95
  offset=offset,
96
+ log_limit=log_limit,
97
+ log_offset=log_offset,
90
98
  )
91
99
  prefect_count = prefect_tasks.get("count", None)
92
100
  return {
@@ -105,6 +113,8 @@ Task = Field(
105
113
  workflow=List(String),
106
114
  ids=List(String),
107
115
  q=String(required=False),
116
+ log_limit=Int(required=False),
117
+ log_offset=Int(required=False),
108
118
  resolver=Tasks.resolve,
109
119
  required=True,
110
120
  )
@@ -99,7 +99,7 @@ class ManyRelationshipResolver:
99
99
  filters = {
100
100
  f"{info.field_name}__{key}": value
101
101
  for key, value in kwargs.items()
102
- if "__" in key and value or key in ["id", "ids"]
102
+ if ("__" in key and value) or key in ["id", "ids"]
103
103
  }
104
104
 
105
105
  response: dict[str, Any] = {"edges": [], "count": None}
@@ -99,7 +99,7 @@ async def default_resolver(*args: Any, **kwargs) -> dict | list[dict] | None:
99
99
  filters = {
100
100
  f"{info.field_name}__{key}": value
101
101
  for key, value in kwargs.items()
102
- if "__" in key and value or key in ["id", "ids"]
102
+ if ("__" in key and value) or key in ["id", "ids"]
103
103
  }
104
104
 
105
105
  async with graphql_context.db.start_session(read_only=True) as db:
@@ -288,7 +288,7 @@ async def hierarchy_resolver(
288
288
  filters = {
289
289
  f"{info.field_name}__{key}": value
290
290
  for key, value in kwargs.items()
291
- if "__" in key and value or key in ["id", "ids"]
291
+ if ("__" in key and value) or key in ["id", "ids"]
292
292
  }
293
293
 
294
294
  response: dict[str, Any] = {"edges": [], "count": None}
@@ -107,7 +107,7 @@ class SingleRelationshipResolver:
107
107
  filters = {
108
108
  f"{field_name}__{key}": value
109
109
  for key, value in kwargs.items()
110
- if "__" in key and value or key in ["id", "ids"]
110
+ if ("__" in key and value) or key in ["id", "ids"]
111
111
  }
112
112
  async with db.start_session(read_only=True) as dbs:
113
113
  objs = await NodeManager.query_peers(
@@ -1,4 +1,4 @@
1
- from graphene import Field, InputObjectType, List, ObjectType, String
1
+ from graphene import Field, InputObjectType, Int, List, NonNull, ObjectType, String
2
2
  from graphene.types.uuid import UUID
3
3
 
4
4
  from .enums import Severity
@@ -26,4 +26,5 @@ class TaskLogNodes(ObjectType):
26
26
 
27
27
 
28
28
  class TaskLogEdge(ObjectType):
29
- edges = List(TaskLogNodes)
29
+ edges = List(NonNull(TaskLogNodes), required=True)
30
+ count = Int(required=True)
infrahub/menu/menu.py CHANGED
@@ -57,7 +57,7 @@ default_menu = [
57
57
  name="IPPrefix",
58
58
  label="IP Prefixes",
59
59
  kind=InfrahubKind.IPPREFIX,
60
- path="/ipam/prefixes",
60
+ path="/ipam",
61
61
  icon=_extract_node_icon(infrahub_schema.get(InfrahubKind.IPPREFIX)),
62
62
  protected=True,
63
63
  section=MenuSection.INTERNAL,
@@ -68,7 +68,7 @@ default_menu = [
68
68
  name="IPAddress",
69
69
  label="IP Addresses",
70
70
  kind=InfrahubKind.IPPREFIX,
71
- path="/ipam/addresses?ipam-tab=ip-details",
71
+ path="/ipam/ip_addresses",
72
72
  icon=_extract_node_icon(infrahub_schema.get(InfrahubKind.IPADDRESS)),
73
73
  protected=True,
74
74
  section=MenuSection.INTERNAL,
@@ -79,6 +79,7 @@ default_menu = [
79
79
  name="Namespaces",
80
80
  label="Namespaces",
81
81
  kind=InfrahubKind.IPNAMESPACE,
82
+ path="/ipam/namespaces",
82
83
  icon=_extract_node_icon(infrahub_schema.get(InfrahubKind.IPNAMESPACE)),
83
84
  protected=True,
84
85
  section=MenuSection.INTERNAL,
@@ -249,16 +250,16 @@ default_menu = [
249
250
  MenuItemDefinition(
250
251
  namespace="Builtin",
251
252
  name="TriggerDefinition",
252
- label="Triggers",
253
+ label="Events",
253
254
  icon=_extract_node_icon(infrahub_schema.get(InfrahubKind.TRIGGERRULE)),
254
255
  protected=True,
255
256
  section=MenuSection.INTERNAL,
256
257
  order_weight=6000,
257
258
  children=[
258
259
  MenuItemDefinition(
259
- namespace="Core",
260
+ namespace="Builtin",
260
261
  name="TriggerRule",
261
- label="Trigger Rules",
262
+ label="Rules",
262
263
  kind=InfrahubKind.TRIGGERRULE,
263
264
  icon=_extract_node_icon(infrahub_schema.get(InfrahubKind.TRIGGERRULE)),
264
265
  protected=True,
@@ -266,9 +267,9 @@ default_menu = [
266
267
  order_weight=1000,
267
268
  ),
268
269
  MenuItemDefinition(
269
- namespace="Core",
270
+ namespace="Builtin",
270
271
  name="Action",
271
- label="Trigger Actions",
272
+ label="Actions",
272
273
  kind=InfrahubKind.ACTION,
273
274
  icon=_extract_node_icon(infrahub_schema.get(InfrahubKind.ACTION)),
274
275
  protected=True,
@@ -26,6 +26,6 @@ async def rebased_branch(message: messages.RefreshRegistryRebasedBranch, service
26
26
 
27
27
  async with lock.registry.local_schema_lock():
28
28
  service.log.info("Refreshing rebased branch")
29
- registry.branch[message.branch] = await registry.branch_object.get_by_name(
30
- name=message.branch, db=service.database
31
- )
29
+
30
+ async with service.database.start_session(read_only=True) as db:
31
+ registry.branch[message.branch] = await registry.branch_object.get_by_name(name=message.branch, db=db)
@@ -4,7 +4,7 @@ from .base import PatchQuery
4
4
 
5
5
  class DeleteDuplicatedEdgesPatchQuery(PatchQuery):
6
6
  """
7
- Find duplicated or overlapping edges of the same status, type, and branch to update and delete
7
+ For all Node vertices, find duplicated or overlapping edges of the same status, type, and branch to update and delete
8
8
  - one edge will be kept for each pair of nodes and a given status, type, and branch. it will be
9
9
  updated to have the earliest "from" and "to" times in this group
10
10
  - all the other duplicate/overlapping edges will be deleted
@@ -17,9 +17,9 @@ class DeleteDuplicatedEdgesPatchQuery(PatchQuery):
17
17
  async def plan(self) -> PatchPlan:
18
18
  query = """
19
19
  // ------------
20
- // Find node pairs that have duplicate edges
20
+ // Find vertex pairs that have duplicate edges
21
21
  // ------------
22
- MATCH (node_with_dup_edges:Node)-[edge]->(peer)
22
+ MATCH (node_with_dup_edges:Node)-[edge]-(peer)
23
23
  WITH node_with_dup_edges, type(edge) AS edge_type, edge.status AS edge_status, edge.branch AS edge_branch, peer, count(*) AS num_dup_edges
24
24
  WHERE num_dup_edges > 1
25
25
  WITH DISTINCT node_with_dup_edges, edge_type, edge_branch, peer
@@ -27,12 +27,12 @@ CALL (node_with_dup_edges, edge_type, edge_branch, peer) {
27
27
  // ------------
28
28
  // Get the earliest active and deleted edges for this branch
29
29
  // ------------
30
- MATCH (node_with_dup_edges)-[active_edge {branch: edge_branch, status: "active"}]->(peer)
30
+ OPTIONAL MATCH (node_with_dup_edges)-[active_edge {branch: edge_branch, status: "active"}]->(peer)
31
31
  WHERE type(active_edge) = edge_type
32
32
  WITH node_with_dup_edges, edge_type, edge_branch, peer, active_edge
33
33
  ORDER BY active_edge.from ASC
34
34
  WITH node_with_dup_edges, edge_type, edge_branch, peer, head(collect(active_edge.from)) AS active_from
35
- OPTIONAL MATCH (node_with_dup_edges)-[deleted_edge {branch: edge_branch, status: "deleted"}]->(peer)
35
+ OPTIONAL MATCH (node_with_dup_edges)-[deleted_edge {branch: edge_branch, status: "deleted"}]-(peer)
36
36
  WITH node_with_dup_edges, edge_type, edge_branch, peer, active_from, deleted_edge
37
37
  ORDER BY deleted_edge.from ASC
38
38
  WITH node_with_dup_edges, edge_type, edge_branch, peer, active_from, head(collect(deleted_edge.from)) AS deleted_from
@@ -40,71 +40,82 @@ CALL (node_with_dup_edges, edge_type, edge_branch, peer) {
40
40
  // Plan one active edge update with correct from and to times
41
41
  // ------------
42
42
  CALL (node_with_dup_edges, edge_type, edge_branch, peer, active_from, deleted_from) {
43
- MATCH (node_with_dup_edges)-[active_e {branch: edge_branch, status: "active"}]->(peer)
43
+ OPTIONAL MATCH (node_with_dup_edges)-[active_e {branch: edge_branch, status: "active"}]->(peer)
44
44
  WHERE type(active_e) = edge_type
45
45
  WITH node_with_dup_edges, edge_type, edge_branch, peer, active_from, deleted_from, active_e
46
46
  ORDER BY %(id_func_name)s(active_e)
47
47
  LIMIT 1
48
48
  WITH active_e, properties(active_e) AS before_props, {from: active_from, to: deleted_from} AS prop_updates
49
- RETURN [
50
- {
51
- db_id: %(id_func_name)s(active_e), before_props: before_props, prop_updates: prop_updates
52
- }
53
- ] AS active_edges_to_update
49
+ RETURN CASE
50
+ WHEN active_e IS NOT NULL THEN [
51
+ {
52
+ db_id: %(id_func_name)s(active_e), before_props: before_props, prop_updates: prop_updates
53
+ }
54
+ ]
55
+ ELSE []
56
+ END AS active_edges_to_update
54
57
  }
55
58
  // ------------
56
59
  // Plan deletes for all the other active edges of this type on this branch
57
60
  // ------------
58
61
  CALL (node_with_dup_edges, edge_type, edge_branch, peer) {
59
- MATCH (node_with_dup_edges)-[active_e {branch: edge_branch, status: "active"}]->(peer)
62
+ OPTIONAL MATCH (node_with_dup_edges)-[active_e {branch: edge_branch, status: "active"}]->(peer)
60
63
  WHERE type(active_e) = edge_type
61
64
  WITH node_with_dup_edges, peer, active_e
62
65
  ORDER BY %(id_func_name)s(active_e)
63
66
  SKIP 1
64
- RETURN collect(
65
- {
67
+ WITH CASE
68
+ WHEN active_e IS NOT NULL THEN {
66
69
  db_id: %(id_func_name)s(active_e),
67
- from_id: %(id_func_name)s(node_with_dup_edges),
68
- to_id: %(id_func_name)s(peer),
70
+ from_id: %(id_func_name)s(startNode(active_e)),
71
+ to_id: %(id_func_name)s(endNode(active_e)),
69
72
  edge_type: type(active_e),
70
73
  before_props: properties(active_e)
71
74
  }
72
- ) AS active_edges_to_delete
75
+ ELSE NULL
76
+ END AS serialized_edge
77
+ RETURN collect(serialized_edge) AS active_edges_to_delete
73
78
  }
74
79
  // ------------
75
80
  // Plan one deleted edge update with correct from time
76
81
  // ------------
77
82
  CALL (node_with_dup_edges, edge_type, edge_branch, peer, deleted_from) {
78
- MATCH (node_with_dup_edges)-[deleted_e {branch: edge_branch, status: "deleted"}]->(peer)
83
+ OPTIONAL MATCH (node_with_dup_edges)-[deleted_e {branch: edge_branch, status: "deleted"}]->(peer)
79
84
  WHERE type(deleted_e) = edge_type
80
85
  WITH node_with_dup_edges, edge_type, edge_branch, peer, deleted_from, deleted_e
81
86
  ORDER BY %(id_func_name)s(deleted_e)
82
87
  LIMIT 1
83
88
  WITH deleted_e, properties(deleted_e) AS before_props, {from: deleted_from} AS prop_updates
84
- RETURN [
85
- {
86
- db_id: %(id_func_name)s(deleted_e), before_props: before_props, prop_updates: prop_updates
87
- }
88
- ] AS deleted_edges_to_update
89
+ RETURN CASE
90
+ WHEN deleted_e IS NOT NULL THEN [
91
+ {
92
+ db_id: %(id_func_name)s(deleted_e), before_props: before_props, prop_updates: prop_updates
93
+ }
94
+ ]
95
+ ELSE []
96
+ END AS deleted_edges_to_update
89
97
  }
90
98
  // ------------
91
99
  // Plan deletes for all the other deleted edges of this type on this branch
92
100
  // ------------
93
101
  CALL (node_with_dup_edges, edge_type, edge_branch, peer) {
94
- MATCH (node_with_dup_edges)-[deleted_e {branch: edge_branch, status: "deleted"}]->(peer)
102
+ OPTIONAL MATCH (node_with_dup_edges)-[deleted_e {branch: edge_branch, status: "deleted"}]->(peer)
95
103
  WHERE type(deleted_e) = edge_type
96
104
  WITH node_with_dup_edges, peer, deleted_e
97
105
  ORDER BY %(id_func_name)s(deleted_e)
98
106
  SKIP 1
99
- RETURN collect(
100
- {
107
+ WITH CASE
108
+ WHEN deleted_e IS NOT NULL THEN {
101
109
  db_id: %(id_func_name)s(deleted_e),
102
- from_id: %(id_func_name)s(node_with_dup_edges),
103
- to_id: %(id_func_name)s(peer),
110
+ from_id: %(id_func_name)s(startNode(deleted_e)),
111
+ to_id: %(id_func_name)s(endNode(deleted_e)),
104
112
  edge_type: type(deleted_e),
105
113
  before_props: properties(deleted_e)
106
114
  }
107
- ) AS deleted_edges_to_delete
115
+ ELSE NULL
116
+ END AS serialized_edge
117
+
118
+ RETURN collect(serialized_edge) AS deleted_edges_to_delete
108
119
  }
109
120
  RETURN
110
121
  active_edges_to_update + deleted_edges_to_update AS edges_to_update,