infrahub-server 1.4.10__py3-none-any.whl → 1.5.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. infrahub/actions/tasks.py +208 -16
  2. infrahub/api/artifact.py +3 -0
  3. infrahub/api/diff/diff.py +1 -1
  4. infrahub/api/query.py +2 -0
  5. infrahub/api/schema.py +3 -0
  6. infrahub/auth.py +5 -5
  7. infrahub/cli/db.py +26 -2
  8. infrahub/cli/db_commands/clean_duplicate_schema_fields.py +212 -0
  9. infrahub/config.py +7 -2
  10. infrahub/core/attribute.py +25 -22
  11. infrahub/core/branch/models.py +2 -2
  12. infrahub/core/branch/needs_rebase_status.py +11 -0
  13. infrahub/core/branch/tasks.py +4 -3
  14. infrahub/core/changelog/models.py +4 -12
  15. infrahub/core/constants/__init__.py +1 -0
  16. infrahub/core/constants/infrahubkind.py +1 -0
  17. infrahub/core/convert_object_type/object_conversion.py +201 -0
  18. infrahub/core/convert_object_type/repository_conversion.py +89 -0
  19. infrahub/core/convert_object_type/schema_mapping.py +27 -3
  20. infrahub/core/diff/model/path.py +4 -0
  21. infrahub/core/diff/payload_builder.py +1 -1
  22. infrahub/core/diff/query/artifact.py +1 -1
  23. infrahub/core/graph/__init__.py +1 -1
  24. infrahub/core/initialization.py +2 -2
  25. infrahub/core/ipam/utilization.py +1 -1
  26. infrahub/core/manager.py +9 -84
  27. infrahub/core/migrations/graph/__init__.py +6 -0
  28. infrahub/core/migrations/graph/m040_profile_attrs_in_db.py +166 -0
  29. infrahub/core/migrations/graph/m041_create_hfid_display_label_in_db.py +97 -0
  30. infrahub/core/migrations/graph/m042_backfill_hfid_display_label_in_db.py +86 -0
  31. infrahub/core/migrations/schema/node_attribute_add.py +5 -2
  32. infrahub/core/migrations/shared.py +5 -6
  33. infrahub/core/node/__init__.py +165 -42
  34. infrahub/core/node/constraints/attribute_uniqueness.py +3 -1
  35. infrahub/core/node/create.py +67 -35
  36. infrahub/core/node/lock_utils.py +98 -0
  37. infrahub/core/node/node_property_attribute.py +230 -0
  38. infrahub/core/node/standard.py +1 -1
  39. infrahub/core/property.py +11 -0
  40. infrahub/core/protocols.py +8 -1
  41. infrahub/core/query/attribute.py +27 -15
  42. infrahub/core/query/node.py +61 -185
  43. infrahub/core/query/relationship.py +43 -26
  44. infrahub/core/query/subquery.py +0 -8
  45. infrahub/core/registry.py +2 -2
  46. infrahub/core/relationship/constraints/count.py +1 -1
  47. infrahub/core/relationship/model.py +60 -20
  48. infrahub/core/schema/attribute_schema.py +0 -2
  49. infrahub/core/schema/basenode_schema.py +42 -2
  50. infrahub/core/schema/definitions/core/__init__.py +2 -0
  51. infrahub/core/schema/definitions/core/generator.py +2 -0
  52. infrahub/core/schema/definitions/core/group.py +16 -2
  53. infrahub/core/schema/definitions/core/repository.py +7 -0
  54. infrahub/core/schema/definitions/internal.py +14 -1
  55. infrahub/core/schema/generated/base_node_schema.py +6 -1
  56. infrahub/core/schema/node_schema.py +5 -2
  57. infrahub/core/schema/relationship_schema.py +0 -1
  58. infrahub/core/schema/schema_branch.py +137 -2
  59. infrahub/core/schema/schema_branch_display.py +123 -0
  60. infrahub/core/schema/schema_branch_hfid.py +114 -0
  61. infrahub/core/validators/aggregated_checker.py +1 -1
  62. infrahub/core/validators/determiner.py +12 -1
  63. infrahub/core/validators/relationship/peer.py +1 -1
  64. infrahub/core/validators/tasks.py +1 -1
  65. infrahub/display_labels/__init__.py +0 -0
  66. infrahub/display_labels/gather.py +48 -0
  67. infrahub/display_labels/models.py +240 -0
  68. infrahub/display_labels/tasks.py +186 -0
  69. infrahub/display_labels/triggers.py +22 -0
  70. infrahub/events/group_action.py +1 -1
  71. infrahub/events/node_action.py +1 -1
  72. infrahub/generators/constants.py +7 -0
  73. infrahub/generators/models.py +38 -12
  74. infrahub/generators/tasks.py +34 -16
  75. infrahub/git/base.py +38 -1
  76. infrahub/git/integrator.py +22 -14
  77. infrahub/graphql/analyzer.py +1 -1
  78. infrahub/graphql/api/dependencies.py +2 -4
  79. infrahub/graphql/api/endpoints.py +2 -2
  80. infrahub/graphql/app.py +2 -4
  81. infrahub/graphql/initialization.py +2 -3
  82. infrahub/graphql/manager.py +212 -137
  83. infrahub/graphql/middleware.py +12 -0
  84. infrahub/graphql/mutations/branch.py +11 -0
  85. infrahub/graphql/mutations/computed_attribute.py +110 -3
  86. infrahub/graphql/mutations/convert_object_type.py +34 -13
  87. infrahub/graphql/mutations/display_label.py +111 -0
  88. infrahub/graphql/mutations/generator.py +25 -7
  89. infrahub/graphql/mutations/hfid.py +118 -0
  90. infrahub/graphql/mutations/ipam.py +21 -8
  91. infrahub/graphql/mutations/main.py +37 -153
  92. infrahub/graphql/mutations/profile.py +195 -0
  93. infrahub/graphql/mutations/proposed_change.py +2 -1
  94. infrahub/graphql/mutations/relationship.py +2 -2
  95. infrahub/graphql/mutations/repository.py +22 -83
  96. infrahub/graphql/mutations/resource_manager.py +2 -2
  97. infrahub/graphql/mutations/schema.py +5 -5
  98. infrahub/graphql/mutations/webhook.py +1 -1
  99. infrahub/graphql/queries/resource_manager.py +1 -1
  100. infrahub/graphql/registry.py +173 -0
  101. infrahub/graphql/resolvers/resolver.py +2 -0
  102. infrahub/graphql/schema.py +8 -1
  103. infrahub/groups/tasks.py +1 -1
  104. infrahub/hfid/__init__.py +0 -0
  105. infrahub/hfid/gather.py +48 -0
  106. infrahub/hfid/models.py +240 -0
  107. infrahub/hfid/tasks.py +185 -0
  108. infrahub/hfid/triggers.py +22 -0
  109. infrahub/lock.py +67 -30
  110. infrahub/locks/__init__.py +0 -0
  111. infrahub/locks/tasks.py +37 -0
  112. infrahub/middleware.py +26 -1
  113. infrahub/patch/plan_writer.py +2 -2
  114. infrahub/profiles/__init__.py +0 -0
  115. infrahub/profiles/node_applier.py +101 -0
  116. infrahub/profiles/queries/__init__.py +0 -0
  117. infrahub/profiles/queries/get_profile_data.py +99 -0
  118. infrahub/profiles/tasks.py +63 -0
  119. infrahub/proposed_change/tasks.py +10 -1
  120. infrahub/repositories/__init__.py +0 -0
  121. infrahub/repositories/create_repository.py +113 -0
  122. infrahub/server.py +16 -3
  123. infrahub/services/__init__.py +8 -5
  124. infrahub/tasks/registry.py +6 -4
  125. infrahub/trigger/catalogue.py +4 -0
  126. infrahub/trigger/models.py +2 -0
  127. infrahub/trigger/tasks.py +3 -0
  128. infrahub/webhook/models.py +1 -1
  129. infrahub/workflows/catalogue.py +110 -3
  130. infrahub/workflows/initialization.py +16 -0
  131. infrahub/workflows/models.py +17 -2
  132. infrahub_sdk/branch.py +5 -8
  133. infrahub_sdk/checks.py +1 -1
  134. infrahub_sdk/client.py +364 -84
  135. infrahub_sdk/convert_object_type.py +61 -0
  136. infrahub_sdk/ctl/check.py +2 -3
  137. infrahub_sdk/ctl/cli_commands.py +18 -12
  138. infrahub_sdk/ctl/config.py +8 -2
  139. infrahub_sdk/ctl/generator.py +6 -3
  140. infrahub_sdk/ctl/graphql.py +184 -0
  141. infrahub_sdk/ctl/repository.py +39 -1
  142. infrahub_sdk/ctl/schema.py +18 -3
  143. infrahub_sdk/ctl/utils.py +4 -0
  144. infrahub_sdk/ctl/validate.py +5 -3
  145. infrahub_sdk/diff.py +4 -5
  146. infrahub_sdk/exceptions.py +2 -0
  147. infrahub_sdk/generator.py +7 -1
  148. infrahub_sdk/graphql/__init__.py +12 -0
  149. infrahub_sdk/graphql/constants.py +1 -0
  150. infrahub_sdk/graphql/plugin.py +85 -0
  151. infrahub_sdk/graphql/query.py +77 -0
  152. infrahub_sdk/{graphql.py → graphql/renderers.py} +88 -75
  153. infrahub_sdk/graphql/utils.py +40 -0
  154. infrahub_sdk/node/attribute.py +2 -0
  155. infrahub_sdk/node/node.py +28 -20
  156. infrahub_sdk/playback.py +1 -2
  157. infrahub_sdk/protocols.py +54 -6
  158. infrahub_sdk/pytest_plugin/plugin.py +7 -4
  159. infrahub_sdk/pytest_plugin/utils.py +40 -0
  160. infrahub_sdk/repository.py +1 -2
  161. infrahub_sdk/schema/__init__.py +38 -0
  162. infrahub_sdk/schema/main.py +1 -0
  163. infrahub_sdk/schema/repository.py +8 -0
  164. infrahub_sdk/spec/object.py +120 -7
  165. infrahub_sdk/spec/range_expansion.py +118 -0
  166. infrahub_sdk/timestamp.py +18 -6
  167. infrahub_sdk/transforms.py +1 -1
  168. {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b1.dist-info}/METADATA +9 -11
  169. {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b1.dist-info}/RECORD +177 -134
  170. infrahub_testcontainers/container.py +1 -1
  171. infrahub_testcontainers/docker-compose-cluster.test.yml +1 -1
  172. infrahub_testcontainers/docker-compose.test.yml +1 -1
  173. infrahub_testcontainers/models.py +2 -2
  174. infrahub_testcontainers/performance_test.py +4 -4
  175. infrahub/core/convert_object_type/conversion.py +0 -134
  176. {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b1.dist-info}/LICENSE.txt +0 -0
  177. {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b1.dist-info}/WHEEL +0 -0
  178. {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b1.dist-info}/entry_points.txt +0 -0
@@ -36,7 +36,7 @@ from .schema.attribute_parameters import NumberAttributeParameters
36
36
  if TYPE_CHECKING:
37
37
  from infrahub.core.branch import Branch
38
38
  from infrahub.core.node import Node
39
- from infrahub.core.schema import AttributeSchema
39
+ from infrahub.core.schema import AttributeSchema, MainSchemaTypes
40
40
  from infrahub.database import InfrahubDatabase
41
41
 
42
42
 
@@ -324,7 +324,7 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin):
324
324
 
325
325
  save_at = Timestamp(at)
326
326
 
327
- if not self.id or self.is_from_profile:
327
+ if not self.id:
328
328
  return None
329
329
 
330
330
  return await self._update(at=save_at, db=db)
@@ -395,7 +395,6 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin):
395
395
 
396
396
  Get the current value
397
397
  - If the value is the same, do nothing
398
- - If the value is inherited and is different, raise error (for now just ignore)
399
398
  - If the value is different, create new node and update relationship
400
399
 
401
400
  """
@@ -470,28 +469,32 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin):
470
469
 
471
470
  # ---------- Update the Node Properties ----------
472
471
  for prop_name in self._node_properties:
473
- if getattr(self, f"{prop_name}_id") and not (
474
- prop_name in current_attr_data.node_properties
475
- and current_attr_data.node_properties[prop_name].uuid == getattr(self, f"{prop_name}_id")
476
- ):
477
- previous_attribute_node_property = current_attr_data.node_properties.get(prop_name)
478
- previous_value = None
479
- if previous_attribute_node_property:
480
- previous_value = previous_attribute_node_property.uuid
472
+ current_prop_id = getattr(self, f"{prop_name}_id")
473
+ database_prop_id: str | None = None
474
+ if prop_name in current_attr_data.node_properties:
475
+ database_prop_id = current_attr_data.node_properties[prop_name].uuid
476
+ needs_update = current_prop_id is not None and current_prop_id != database_prop_id
477
+ needs_clear = self.is_clear(prop_name) and database_prop_id
478
+
479
+ if not needs_update and not needs_clear:
480
+ continue
481
481
 
482
- changelog.add_property(
483
- name=prop_name,
484
- value_current=getattr(self, f"{prop_name}_id"),
485
- value_previous=previous_value,
486
- )
482
+ changelog.add_property(
483
+ name=prop_name,
484
+ value_current=current_prop_id,
485
+ value_previous=database_prop_id,
486
+ )
487
+
488
+ if needs_update:
487
489
  query = await AttributeUpdateNodePropertyQuery.init(
488
- db=db, attr=self, at=update_at, prop_name=prop_name, prop_id=getattr(self, f"{prop_name}_id")
490
+ db=db, attr=self, at=update_at, prop_name=prop_name, prop_id=current_prop_id
489
491
  )
490
492
  await query.execute(db=db)
491
493
 
492
- rel = current_attr_result.get(f"rel_{prop_name}")
493
- if rel and rel.get("branch") == branch.name:
494
- await update_relationships_to([rel.element_id], to=update_at, db=db)
494
+ # set the to time on the previously active edge
495
+ rel = current_attr_result.get(f"rel_{prop_name}")
496
+ if rel and rel.get("branch") == branch.name:
497
+ await update_relationships_to([rel.element_id], to=update_at, db=db)
495
498
 
496
499
  if changelog.has_updates:
497
500
  return changelog
@@ -627,7 +630,7 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin):
627
630
  return AttributeDBNodeType.DEFAULT
628
631
  return AttributeDBNodeType.INDEXED
629
632
 
630
- def get_create_data(self) -> AttributeCreateData:
633
+ def get_create_data(self, node_schema: MainSchemaTypes) -> AttributeCreateData:
631
634
  branch = self.branch
632
635
  hierarchy_level = branch.hierarchy_level
633
636
  if self.schema.branch == BranchSupportType.AGNOSTIC:
@@ -642,7 +645,7 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin):
642
645
  branch=branch.name,
643
646
  status="active",
644
647
  branch_level=hierarchy_level,
645
- branch_support=self.schema.branch.value,
648
+ branch_support=self.schema.branch.value if self.schema.branch is not None else node_schema.branch,
646
649
  content=self.to_db(),
647
650
  is_default=self.is_default,
648
651
  is_protected=self.is_protected,
@@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Any, Optional, Self, Union
5
5
 
6
6
  from pydantic import Field, field_validator
7
7
 
8
+ from infrahub.core.branch.enums import BranchStatus
8
9
  from infrahub.core.constants import (
9
10
  GLOBAL_BRANCH_NAME,
10
11
  )
@@ -21,8 +22,6 @@ from infrahub.core.registry import registry
21
22
  from infrahub.core.timestamp import Timestamp
22
23
  from infrahub.exceptions import BranchNotFoundError, InitializationError, ValidationError
23
24
 
24
- from .enums import BranchStatus
25
-
26
25
  if TYPE_CHECKING:
27
26
  from infrahub.database import InfrahubDatabase
28
27
 
@@ -485,6 +484,7 @@ class Branch(StandardNode):
485
484
  # FIXME, we must ensure that there is no conflict before rebasing a branch
486
485
  # Otherwise we could endup with a complicated situation
487
486
  self.branched_from = at.to_string()
487
+ self.status = BranchStatus.OPEN
488
488
  await self.save(db=db)
489
489
 
490
490
  # Update the branch in the registry after the rebase
@@ -0,0 +1,11 @@
1
+ from infrahub.core.branch import Branch
2
+ from infrahub.core.branch.enums import BranchStatus
3
+
4
+
5
+ def raise_needs_rebase_error(branch_name: str) -> None:
6
+ raise ValueError(f"Branch {branch_name} must be rebased before any updates can be made")
7
+
8
+
9
+ def check_need_rebase_status(branch: Branch) -> None:
10
+ if branch.status == BranchStatus.NEED_REBASE:
11
+ raise_needs_rebase_error(branch_name=branch.name)
@@ -33,6 +33,7 @@ from infrahub.events.branch_action import BranchCreatedEvent, BranchDeletedEvent
33
33
  from infrahub.events.models import EventMeta, InfrahubEvent
34
34
  from infrahub.events.node_action import get_node_event
35
35
  from infrahub.exceptions import BranchNotFoundError, ValidationError
36
+ from infrahub.generators.constants import GeneratorDefinitionRunSource
36
37
  from infrahub.graphql.mutations.models import BranchCreateModel # noqa: TC001
37
38
  from infrahub.workers.dependencies import get_component, get_database, get_event_service, get_workflow
38
39
  from infrahub.workflows.catalogue import (
@@ -345,7 +346,7 @@ async def create_branch(model: BranchCreateModel, context: InfrahubContext) -> N
345
346
  async with database.start_session() as db:
346
347
  try:
347
348
  await Branch.get_by_name(db=db, name=model.name)
348
- raise ValueError(f"The branch {model.name}, already exist")
349
+ raise ValidationError(f"The branch {model.name} already exists")
349
350
  except BranchNotFoundError:
350
351
  pass
351
352
 
@@ -356,7 +357,7 @@ async def create_branch(model: BranchCreateModel, context: InfrahubContext) -> N
356
357
  obj = Branch(**data_dict)
357
358
  except pydantic.ValidationError as exc:
358
359
  error_msgs = [f"invalid field {error['loc'][0]}: {error['msg']}" for error in exc.errors()]
359
- raise ValueError("\n".join(error_msgs)) from exc
360
+ raise ValidationError("\n".join(error_msgs)) from exc
360
361
 
361
362
  async with lock.registry.local_schema_lock():
362
363
  # Copy the schema from the origin branch and set the hash and the schema_changed_at value
@@ -437,7 +438,7 @@ async def post_process_branch_merge(source_branch: str, target_branch: str, cont
437
438
  await get_workflow().submit_workflow(
438
439
  workflow=TRIGGER_GENERATOR_DEFINITION_RUN,
439
440
  context=context,
440
- parameters={"branch": target_branch},
441
+ parameters={"branch": target_branch, "source": GeneratorDefinitionRunSource.MERGE},
441
442
  )
442
443
 
443
444
  for diff_root in branch_diff_roots:
@@ -560,7 +560,7 @@ class RelationshipChangelogGetter:
560
560
 
561
561
  for peer in relationship.peers:
562
562
  if peer.peer_status == DiffAction.ADDED:
563
- peer_schema = schema_branch.get(name=peer.peer_kind)
563
+ peer_schema = schema_branch.get(name=peer.peer_kind, duplicate=False)
564
564
  secondaries.extend(
565
565
  self._process_added_peers(
566
566
  peer_id=peer.peer_id,
@@ -572,7 +572,7 @@ class RelationshipChangelogGetter:
572
572
  )
573
573
 
574
574
  elif peer.peer_status == DiffAction.REMOVED:
575
- peer_schema = schema_branch.get(name=peer.peer_kind)
575
+ peer_schema = schema_branch.get(name=peer.peer_kind, duplicate=False)
576
576
  secondaries.extend(
577
577
  self._process_removed_peers(
578
578
  peer_id=peer.peer_id,
@@ -596,11 +596,7 @@ class RelationshipChangelogGetter:
596
596
  secondaries: list[NodeChangelog] = []
597
597
  peer_relation = peer_schema.get_relationship_by_identifier(id=str(rel_schema.identifier), raise_on_error=False)
598
598
  if peer_relation:
599
- node_changelog = NodeChangelog(
600
- node_id=peer_id,
601
- node_kind=peer_kind,
602
- display_label="n/a",
603
- )
599
+ node_changelog = NodeChangelog(node_id=peer_id, node_kind=peer_kind, display_label="n/a")
604
600
  if peer_relation.cardinality == RelationshipCardinality.ONE:
605
601
  node_changelog.relationships[peer_relation.name] = RelationshipCardinalityOneChangelog(
606
602
  name=peer_relation.name,
@@ -634,11 +630,7 @@ class RelationshipChangelogGetter:
634
630
  secondaries: list[NodeChangelog] = []
635
631
  peer_relation = peer_schema.get_relationship_by_identifier(id=str(rel_schema.identifier), raise_on_error=False)
636
632
  if peer_relation:
637
- node_changelog = NodeChangelog(
638
- node_id=peer_id,
639
- node_kind=peer_kind,
640
- display_label="n/a",
641
- )
633
+ node_changelog = NodeChangelog(node_id=peer_id, node_kind=peer_kind, display_label="n/a")
642
634
  if peer_relation.cardinality == RelationshipCardinality.ONE:
643
635
  node_changelog.relationships[peer_relation.name] = RelationshipCardinalityOneChangelog(
644
636
  name=peer_relation.name,
@@ -387,3 +387,4 @@ DEFAULT_REL_IDENTIFIER_LENGTH = 128
387
387
 
388
388
  OBJECT_TEMPLATE_RELATIONSHIP_NAME = "object_template"
389
389
  OBJECT_TEMPLATE_NAME_ATTR = "template_name"
390
+ PROFILE_NODE_RELATIONSHIP_IDENTIFIER = "node__profile"
@@ -28,6 +28,7 @@ GENERATORDEFINITION = "CoreGeneratorDefinition"
28
28
  GENERATORINSTANCE = "CoreGeneratorInstance"
29
29
  GENERATORVALIDATOR = "CoreGeneratorValidator"
30
30
  GENERATORGROUP = "CoreGeneratorGroup"
31
+ GENERATORAWAREGROUP = "CoreGeneratorAwareGroup"
31
32
  GENERICGROUP = "CoreGroup"
32
33
  GLOBALPERMISSION = "CoreGlobalPermission"
33
34
  GRAPHQLQUERY = "CoreGraphQLQuery"
@@ -0,0 +1,201 @@
1
+ from typing import Any, assert_never
2
+
3
+ from infrahub_sdk.convert_object_type import ConversionFieldInput, ConversionFieldValue
4
+
5
+ from infrahub.core.attribute import BaseAttribute
6
+ from infrahub.core.branch import Branch
7
+ from infrahub.core.branch.enums import BranchStatus
8
+ from infrahub.core.constants import BranchSupportType, RelationshipCardinality
9
+ from infrahub.core.manager import NodeManager
10
+ from infrahub.core.node import Node
11
+ from infrahub.core.node.create import create_node
12
+ from infrahub.core.query.relationship import GetAllPeersIds
13
+ from infrahub.core.query.resource_manager import PoolChangeReserved
14
+ from infrahub.core.relationship import RelationshipManager
15
+ from infrahub.core.schema import NodeSchema
16
+ from infrahub.core.timestamp import Timestamp
17
+ from infrahub.database import InfrahubDatabase
18
+ from infrahub.message_bus.messages import RefreshRegistryBranches
19
+ from infrahub.tasks.registry import update_branch_registry
20
+ from infrahub.workers.dependencies import get_message_bus
21
+
22
+
23
+ def _get_conversion_field_raw_value(conv_field_value: ConversionFieldValue) -> Any:
24
+ if conv_field_value.attribute_value is not None:
25
+ return conv_field_value.attribute_value
26
+ if conv_field_value.peer_id is not None:
27
+ return conv_field_value.peer_id
28
+ if conv_field_value.peers_ids is not None:
29
+ return conv_field_value.peers_ids
30
+ raise ValueError("ConversionFieldValue has not been validated correctly.")
31
+
32
+
33
+ async def get_out_rels_peers_ids(node: Node, db: InfrahubDatabase, at: Timestamp) -> list[str]:
34
+ all_peers_ids: list[str] = []
35
+ for name in node._relationships:
36
+ relm: RelationshipManager = getattr(node, name)
37
+ peers = await relm.get_db_peers(db=db, at=at)
38
+ all_peers_ids.extend([str(peer.peer_id) for peer in peers])
39
+ return all_peers_ids
40
+
41
+
42
+ async def build_data_new_node(db: InfrahubDatabase, mapping: dict[str, ConversionFieldInput], node: Node) -> dict:
43
+ """Value of a given field on the target kind to convert is either an input source attribute/relationship of the source node,
44
+ or a raw value."""
45
+
46
+ data = {}
47
+ for dest_field_name, conv_field_input in mapping.items():
48
+ if conv_field_input.source_field is not None:
49
+ # Fetch the value of the corresponding field from the node being converted.
50
+ item = getattr(node, conv_field_input.source_field)
51
+ if isinstance(item, BaseAttribute):
52
+ data[dest_field_name] = item.value
53
+ elif isinstance(item, RelationshipManager):
54
+ if item.schema.cardinality == RelationshipCardinality.ONE:
55
+ peer = await item.get_peer(db=db)
56
+ if peer is not None:
57
+ data[dest_field_name] = {"id": peer.id}
58
+ # else, relationship is optional, and if the target relationship is mandatory an error will be raised during creation
59
+ elif item.schema.cardinality == RelationshipCardinality.MANY:
60
+ data[dest_field_name] = [{"id": peer.id} for _, peer in (await item.get_peers(db=db)).items()]
61
+ else:
62
+ assert_never(item.schema.cardinality)
63
+ elif conv_field_input.data is not None:
64
+ data[dest_field_name] = _get_conversion_field_raw_value(conv_field_input.data)
65
+ elif conv_field_input.use_default_value is True:
66
+ pass # default value will be used automatically when creating the node
67
+ else:
68
+ raise ValueError("ConversionFieldInput has not been validated correctly.")
69
+ return data
70
+
71
+
72
+ async def get_unidirectional_rels_peers_ids(
73
+ node: Node, branch: Branch, db: InfrahubDatabase, at: Timestamp
74
+ ) -> list[str]:
75
+ """
76
+ Returns peers ids of nodes connected to input `node` through an incoming unidirectional relationship.
77
+ """
78
+
79
+ out_rels_identifier = [rel.identifier for rel in node.get_schema().relationships]
80
+ branch_agnostic = node.get_schema().branch == BranchSupportType.AGNOSTIC
81
+ query = await GetAllPeersIds.init(
82
+ db=db,
83
+ node_id=node.id,
84
+ branch=branch,
85
+ exclude_identifiers=out_rels_identifier,
86
+ branch_agnostic=branch_agnostic,
87
+ at=at,
88
+ )
89
+ await query.execute(db=db)
90
+ return query.get_peers_uuids()
91
+
92
+
93
+ async def _get_other_active_branches(db: InfrahubDatabase) -> list[Branch]:
94
+ branches = await Branch.get_list(db=db)
95
+ return [branch for branch in branches if not (branch.is_global or branch.is_default)]
96
+
97
+
98
+ def _has_pass_thru_aware_attributes(node_schema: NodeSchema, mapping: dict[str, ConversionFieldInput]) -> bool:
99
+ aware_attributes = [attr for attr in node_schema.attributes if attr.branch != BranchSupportType.AGNOSTIC]
100
+ aware_attributes_pass_thru = [
101
+ attr.name for attr in aware_attributes if attr.name in mapping and mapping[attr.name].source_field is not None
102
+ ]
103
+ return len(aware_attributes_pass_thru) > 0
104
+
105
+
106
+ async def validate_conversion(
107
+ deleted_node: Node, branch: Branch, db: InfrahubDatabase, timestamp_before_conversion: Timestamp
108
+ ) -> None:
109
+ deleted_node_out_rels_peer_ids = await get_out_rels_peers_ids(
110
+ node=deleted_node, db=db, at=timestamp_before_conversion
111
+ )
112
+ deleted_node_unidir_rels_peer_ids = await get_unidirectional_rels_peers_ids(
113
+ node=deleted_node, db=db, branch=branch, at=timestamp_before_conversion
114
+ )
115
+
116
+ # Make sure relationships with constraints are not broken by retrieving them
117
+ peers_ids = deleted_node_out_rels_peer_ids + deleted_node_unidir_rels_peer_ids
118
+ peers = await NodeManager.get_many(ids=peers_ids, db=db, prefetch_relationships=True, branch=branch)
119
+ for peer in peers.values():
120
+ peer.validate_relationships()
121
+
122
+
123
+ async def convert_and_validate_object_type(
124
+ node: Node,
125
+ target_schema: NodeSchema,
126
+ mapping: dict[str, ConversionFieldInput],
127
+ branch: Branch,
128
+ db: InfrahubDatabase,
129
+ ) -> Node:
130
+ async with db.start_transaction() as dbt:
131
+ timestamp_before_conversion = Timestamp()
132
+ new_node = await convert_object_type(
133
+ node=node, target_schema=target_schema, mapping=mapping, branch=branch, db=dbt
134
+ )
135
+ await validate_conversion(
136
+ deleted_node=node, branch=branch, db=dbt, timestamp_before_conversion=timestamp_before_conversion
137
+ )
138
+
139
+ # Refresh outside the transaction otherwise other workers would pull outdated branch objects.
140
+ message_bus = await get_message_bus()
141
+ await message_bus.send(RefreshRegistryBranches())
142
+
143
+ return new_node
144
+
145
+
146
+ async def convert_object_type(
147
+ node: Node,
148
+ target_schema: NodeSchema,
149
+ mapping: dict[str, ConversionFieldInput],
150
+ branch: Branch,
151
+ db: InfrahubDatabase,
152
+ ) -> Node:
153
+ """Delete the node and return the new created one. If creation fails, the node is not deleted, and raise an error.
154
+ An extra check is performed on input node peers relationships to make sure they are still valid."""
155
+
156
+ node_schema = node.get_schema()
157
+ if not isinstance(node_schema, NodeSchema):
158
+ raise ValueError(f"Only a node with a NodeSchema can be converted, got {type(node_schema)}")
159
+
160
+ # Delete the node, so we delete relationships with peers as well, which might temporarily break cardinality constraints
161
+ # but they should be restored when creating the new node.
162
+ deleted_nodes = await NodeManager.delete(db=db, branch=branch, nodes=[node], cascade_delete=False)
163
+ if len(deleted_nodes) != 1:
164
+ raise ValueError(f"Deleted {len(deleted_nodes)} nodes instead of 1")
165
+
166
+ data_new_node = await build_data_new_node(db, mapping, node)
167
+
168
+ if node_schema.branch == BranchSupportType.AGNOSTIC and _has_pass_thru_aware_attributes(
169
+ node_schema=node_schema, mapping=mapping
170
+ ):
171
+ if not branch.is_default:
172
+ raise ValueError(
173
+ f"Conversion of {node_schema.kind} is not allowed on branch {branch.name} because it is agnostic and has aware attributes"
174
+ )
175
+
176
+ # When converting an agnostic node with aware attributes, we need to put other branches in NEED_REBASE state
177
+ # as aware attributes do not exist in other branches after conversion
178
+ other_branches = await _get_other_active_branches(db=db)
179
+ for br in other_branches:
180
+ br.status = BranchStatus.NEED_REBASE
181
+ await br.save(db=db)
182
+ # Registry of other API workers are updated outside the transaction
183
+ await update_branch_registry(db=db, branch=br)
184
+
185
+ node_created = await create_node(
186
+ data=data_new_node,
187
+ db=db,
188
+ branch=branch,
189
+ schema=target_schema,
190
+ )
191
+
192
+ # If the node had some value reserved in any Pools / Resource Manager, we need to change the identifier of the reservation(s)
193
+ query = await PoolChangeReserved.init(
194
+ db=db,
195
+ existing_identifier=node.get_id(),
196
+ new_identifier=node_created.get_id(),
197
+ branch=branch,
198
+ )
199
+ await query.execute(db=db)
200
+
201
+ return node_created
@@ -0,0 +1,89 @@
1
+ from infrahub import lock
2
+ from infrahub.core.branch import Branch
3
+ from infrahub.core.constants.infrahubkind import REPOSITORYVALIDATOR, USERVALIDATOR
4
+ from infrahub.core.convert_object_type.object_conversion import (
5
+ ConversionFieldInput,
6
+ convert_object_type,
7
+ validate_conversion,
8
+ )
9
+ from infrahub.core.manager import NodeManager
10
+ from infrahub.core.node import Node
11
+ from infrahub.core.protocols import CoreReadOnlyRepository, CoreRepository
12
+ from infrahub.core.schema import NodeSchema
13
+ from infrahub.core.timestamp import Timestamp
14
+ from infrahub.database import InfrahubDatabase
15
+ from infrahub.message_bus.messages import RefreshRegistryBranches
16
+ from infrahub.repositories.create_repository import RepositoryFinalizer
17
+ from infrahub.workers.dependencies import get_message_bus
18
+
19
+
20
+ async def convert_repository_type(
21
+ repository: CoreRepository | CoreReadOnlyRepository,
22
+ target_schema: NodeSchema,
23
+ mapping: dict[str, ConversionFieldInput],
24
+ branch: Branch,
25
+ db: InfrahubDatabase,
26
+ repository_post_creator: RepositoryFinalizer,
27
+ ) -> Node:
28
+ """Delete the node and return the new created one. If creation fails, the node is not deleted, and raise an error.
29
+ An extra check is performed on input node peers relationships to make sure they are still valid."""
30
+
31
+ repo_name = repository.name.value
32
+ async with lock.registry.get(name=repo_name, namespace="repository"):
33
+ async with db.start_transaction() as dbt:
34
+ timestamp_before_conversion = Timestamp()
35
+
36
+ # Fetch validators before deleting the repository otherwise validator-repository would no longer exist
37
+ user_validators = await NodeManager.query(
38
+ db=dbt, schema=USERVALIDATOR, prefetch_relationships=True, filters={"repository__id": repository.id}
39
+ )
40
+ repository_validators = await NodeManager.query(
41
+ db=dbt,
42
+ schema=REPOSITORYVALIDATOR,
43
+ prefetch_relationships=True,
44
+ filters={"repository__id": repository.id},
45
+ )
46
+ new_repository = await convert_object_type(
47
+ node=repository, # type: ignore[arg-type]
48
+ target_schema=target_schema,
49
+ mapping=mapping,
50
+ branch=branch,
51
+ db=dbt,
52
+ )
53
+
54
+ for user_validator in user_validators:
55
+ await user_validator.repository.update(db=dbt, data=new_repository)
56
+ await user_validator.repository.save(db=dbt)
57
+
58
+ for repository_validator in repository_validators:
59
+ await repository_validator.repository.update(db=dbt, data=new_repository)
60
+ await repository_validator.repository.save(db=dbt)
61
+
62
+ await validate_conversion(
63
+ deleted_node=repository, # type: ignore[arg-type]
64
+ branch=branch,
65
+ db=dbt,
66
+ timestamp_before_conversion=timestamp_before_conversion,
67
+ )
68
+
69
+ # Refresh outside the transaction otherwise other workers would pull outdated branch objects.
70
+ message_bus = await get_message_bus()
71
+ await message_bus.send(RefreshRegistryBranches())
72
+
73
+ # Following call involve a potential update of `commit` value of the newly created repository
74
+ # that would be done from another database connection so it can't be performed within above transaction.
75
+ # Also note since the conversion can only be performed on main branch here, it is fine that we do it
76
+ # after having updating other branches status to NEEDS_REBASE.
77
+ await repository_post_creator.post_create(
78
+ branch=branch,
79
+ obj=new_repository, # type: ignore
80
+ db=db,
81
+ delete_on_connectivity_failure=False,
82
+ )
83
+
84
+ # Delete the RepositoryGroup associated with the old repository, as a new one was created for the new repository.
85
+ repository_groups = (await repository.groups_objects.get_peers(db=db)).values()
86
+ for repository_group in repository_groups:
87
+ await NodeManager.delete(db=db, branch=branch, nodes=[repository_group], cascade_delete=False)
88
+
89
+ return new_repository
@@ -1,6 +1,6 @@
1
1
  from pydantic import BaseModel
2
2
 
3
- from infrahub.core.constants import RelationshipCardinality
3
+ from infrahub.core.constants import BranchSupportType, RelationshipCardinality
4
4
  from infrahub.core.schema import NodeSchema
5
5
 
6
6
 
@@ -13,6 +13,21 @@ class SchemaMappingValue(BaseModel):
13
13
  SchemaMapping = dict[str, SchemaMappingValue]
14
14
 
15
15
 
16
+ def _are_branch_support_matching(
17
+ left_branch_support: BranchSupportType | None,
18
+ right_branch_support: BranchSupportType | None,
19
+ ) -> bool:
20
+ if left_branch_support == right_branch_support:
21
+ return True
22
+
23
+ local_aware = [BranchSupportType.AWARE, BranchSupportType.LOCAL]
24
+
25
+ if left_branch_support in local_aware and right_branch_support in local_aware:
26
+ return True
27
+
28
+ return False
29
+
30
+
16
31
  def get_schema_mapping(source_schema: NodeSchema, target_schema: NodeSchema) -> SchemaMapping:
17
32
  """
18
33
  Return fields mapping meant to be used for converting a node from `source_kind` to `target_kind`.
@@ -31,7 +46,11 @@ def get_schema_mapping(source_schema: NodeSchema, target_schema: NodeSchema) ->
31
46
  # Process attributes
32
47
  for target_attr in target_schema.attributes:
33
48
  source_attr = source_attrs.get(target_attr.name)
34
- if source_attr and source_attr.kind == target_attr.kind:
49
+ if (
50
+ source_attr
51
+ and source_attr.kind == target_attr.kind
52
+ and _are_branch_support_matching(source_attr.branch, target_attr.branch)
53
+ ):
35
54
  target_field_to_source_field[target_attr.name] = SchemaMappingValue(
36
55
  source_field_name=source_attr.name, is_mandatory=not target_attr.optional
37
56
  )
@@ -41,7 +60,12 @@ def get_schema_mapping(source_schema: NodeSchema, target_schema: NodeSchema) ->
41
60
  # Process relationships
42
61
  for target_rel in target_schema.relationships:
43
62
  source_rel = source_rels.get(target_rel.name)
44
- if source_rel and source_rel.peer == target_rel.peer and source_rel.cardinality == target_rel.cardinality:
63
+ if (
64
+ source_rel
65
+ and source_rel.peer == target_rel.peer
66
+ and source_rel.cardinality == target_rel.cardinality
67
+ and _are_branch_support_matching(source_rel.branch, target_rel.branch)
68
+ ):
45
69
  target_field_to_source_field[target_rel.name] = SchemaMappingValue(
46
70
  source_field_name=source_rel.name,
47
71
  is_mandatory=not target_rel.optional,
@@ -335,6 +335,10 @@ class EnrichedDiffNode(BaseSummary):
335
335
  def kind(self) -> str:
336
336
  return self.identifier.kind
337
337
 
338
+ @property
339
+ def is_schema_node(self) -> bool:
340
+ return self.identifier.kind.startswith("Schema")
341
+
338
342
  @property
339
343
  def num_properties(self) -> int:
340
344
  return sum(a.num_properties for a in self.attributes) + sum(r.num_properties for r in self.relationships)
@@ -36,7 +36,7 @@ async def get_display_labels_per_kind(
36
36
  break
37
37
  node_map = await NodeManager.get_many(ids=limited_ids, fields=fields, db=db, branch=branch)
38
38
  for node_id, node in node_map.items():
39
- display_label_map[node_id] = await node.render_display_label(db=db)
39
+ display_label_map[node_id] = await node.get_display_label(db=db)
40
40
  offset += limit
41
41
  return display_label_map
42
42
 
@@ -148,7 +148,7 @@ CALL (target_node, definition_node){
148
148
  )
149
149
  RETURN
150
150
  target_artifact,
151
- (trel1.status = "active" AND trel2.status = "active" AND drel1.status = "active" AND drel1.status = "active") AS artifact_is_active
151
+ (trel1.status = "active" AND trel2.status = "active" AND drel1.status = "active" AND drel2.status = "active") AS artifact_is_active
152
152
  ORDER BY trel1.from DESC, trel2.from DESC, drel1.from DESC, drel2.from DESC,
153
153
  trel1.status ASC, trel2.status ASC, drel1.status ASC, drel2.status ASC
154
154
  LIMIT 1
@@ -1 +1 @@
1
- GRAPH_VERSION = 39
1
+ GRAPH_VERSION = 42
@@ -34,7 +34,7 @@ from infrahub.database import InfrahubDatabase
34
34
  from infrahub.database.memgraph import IndexManagerMemgraph
35
35
  from infrahub.database.neo4j import IndexManagerNeo4j
36
36
  from infrahub.exceptions import DatabaseError
37
- from infrahub.graphql.manager import GraphQLSchemaManager
37
+ from infrahub.graphql.manager import registry as graphql_registry
38
38
  from infrahub.log import get_logger
39
39
  from infrahub.menu.utils import create_default_menu
40
40
  from infrahub.permissions import PermissionBackend, get_or_create_global_permission
@@ -196,7 +196,7 @@ async def initialization(db: InfrahubDatabase, add_database_indexes: bool = Fals
196
196
 
197
197
  default_branch = registry.get_branch_from_registry(branch=registry.default_branch)
198
198
  schema_branch = registry.schema.get_schema_branch(name=default_branch.name)
199
- gqlm = GraphQLSchemaManager.get_manager_for_branch(branch=default_branch, schema_branch=schema_branch)
199
+ gqlm = graphql_registry.get_manager_for_branch(branch=default_branch, schema_branch=schema_branch)
200
200
  gqlm.get_graphql_schema(
201
201
  include_query=True,
202
202
  include_mutation=True,
@@ -152,4 +152,4 @@ class PrefixUtilizationGetter:
152
152
  grand_total_space += prefix_total_space
153
153
  if grand_total_space == 0:
154
154
  return 0.0
155
- return (grand_total_used / grand_total_space) * 100
155
+ return min((grand_total_used / grand_total_space) * 100, 100)