infrahub-server 1.5.0b1__py3-none-any.whl → 1.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (171) hide show
  1. infrahub/api/dependencies.py +4 -13
  2. infrahub/api/internal.py +2 -0
  3. infrahub/api/oauth2.py +13 -19
  4. infrahub/api/oidc.py +15 -21
  5. infrahub/api/schema.py +24 -3
  6. infrahub/api/transformation.py +22 -20
  7. infrahub/artifacts/models.py +2 -1
  8. infrahub/auth.py +137 -3
  9. infrahub/cli/__init__.py +2 -0
  10. infrahub/cli/db.py +158 -155
  11. infrahub/cli/dev.py +118 -0
  12. infrahub/cli/tasks.py +46 -0
  13. infrahub/cli/upgrade.py +56 -9
  14. infrahub/computed_attribute/tasks.py +20 -8
  15. infrahub/core/attribute.py +10 -2
  16. infrahub/core/branch/enums.py +1 -1
  17. infrahub/core/branch/models.py +7 -3
  18. infrahub/core/branch/tasks.py +68 -7
  19. infrahub/core/constants/__init__.py +3 -0
  20. infrahub/core/diff/calculator.py +2 -2
  21. infrahub/core/diff/query/artifact.py +1 -0
  22. infrahub/core/diff/query/delete_query.py +9 -5
  23. infrahub/core/diff/query/field_summary.py +1 -0
  24. infrahub/core/diff/query/merge.py +39 -23
  25. infrahub/core/graph/__init__.py +1 -1
  26. infrahub/core/initialization.py +5 -2
  27. infrahub/core/migrations/__init__.py +3 -0
  28. infrahub/core/migrations/exceptions.py +4 -0
  29. infrahub/core/migrations/graph/__init__.py +12 -13
  30. infrahub/core/migrations/graph/load_schema_branch.py +21 -0
  31. infrahub/core/migrations/graph/m013_convert_git_password_credential.py +1 -1
  32. infrahub/core/migrations/graph/m037_index_attr_vals.py +11 -30
  33. infrahub/core/migrations/graph/m039_ipam_reconcile.py +9 -7
  34. infrahub/core/migrations/graph/m040_duplicated_attributes.py +81 -0
  35. infrahub/core/migrations/graph/m041_deleted_dup_edges.py +149 -0
  36. infrahub/core/migrations/graph/m042_profile_attrs_in_db.py +147 -0
  37. infrahub/core/migrations/graph/m043_create_hfid_display_label_in_db.py +164 -0
  38. infrahub/core/migrations/graph/m044_backfill_hfid_display_label_in_db.py +864 -0
  39. infrahub/core/migrations/query/__init__.py +7 -8
  40. infrahub/core/migrations/query/attribute_add.py +8 -6
  41. infrahub/core/migrations/query/attribute_remove.py +134 -0
  42. infrahub/core/migrations/runner.py +54 -0
  43. infrahub/core/migrations/schema/attribute_kind_update.py +9 -3
  44. infrahub/core/migrations/schema/attribute_supports_profile.py +90 -0
  45. infrahub/core/migrations/schema/node_attribute_add.py +30 -2
  46. infrahub/core/migrations/schema/node_attribute_remove.py +13 -109
  47. infrahub/core/migrations/schema/node_kind_update.py +2 -1
  48. infrahub/core/migrations/schema/node_remove.py +2 -1
  49. infrahub/core/migrations/schema/placeholder_dummy.py +3 -2
  50. infrahub/core/migrations/shared.py +62 -14
  51. infrahub/core/models.py +2 -2
  52. infrahub/core/node/__init__.py +42 -12
  53. infrahub/core/node/create.py +46 -63
  54. infrahub/core/node/lock_utils.py +70 -44
  55. infrahub/core/node/resource_manager/ip_address_pool.py +2 -1
  56. infrahub/core/node/resource_manager/ip_prefix_pool.py +2 -1
  57. infrahub/core/node/resource_manager/number_pool.py +2 -1
  58. infrahub/core/query/attribute.py +55 -0
  59. infrahub/core/query/diff.py +61 -16
  60. infrahub/core/query/ipam.py +16 -4
  61. infrahub/core/query/node.py +51 -43
  62. infrahub/core/query/relationship.py +1 -0
  63. infrahub/core/relationship/model.py +10 -5
  64. infrahub/core/schema/__init__.py +56 -0
  65. infrahub/core/schema/attribute_schema.py +4 -0
  66. infrahub/core/schema/definitions/core/check.py +1 -1
  67. infrahub/core/schema/definitions/core/transform.py +1 -1
  68. infrahub/core/schema/definitions/internal.py +2 -2
  69. infrahub/core/schema/generated/attribute_schema.py +2 -2
  70. infrahub/core/schema/manager.py +22 -1
  71. infrahub/core/schema/schema_branch.py +180 -22
  72. infrahub/core/schema/schema_branch_display.py +12 -0
  73. infrahub/core/schema/schema_branch_hfid.py +6 -0
  74. infrahub/core/validators/uniqueness/checker.py +2 -1
  75. infrahub/database/__init__.py +0 -13
  76. infrahub/database/graph.py +21 -0
  77. infrahub/display_labels/tasks.py +13 -7
  78. infrahub/events/branch_action.py +27 -1
  79. infrahub/generators/tasks.py +3 -7
  80. infrahub/git/base.py +4 -1
  81. infrahub/git/integrator.py +1 -1
  82. infrahub/git/models.py +2 -1
  83. infrahub/git/repository.py +22 -5
  84. infrahub/git/tasks.py +66 -10
  85. infrahub/git/utils.py +123 -1
  86. infrahub/graphql/analyzer.py +9 -0
  87. infrahub/graphql/api/endpoints.py +14 -4
  88. infrahub/graphql/manager.py +4 -9
  89. infrahub/graphql/mutations/branch.py +5 -0
  90. infrahub/graphql/mutations/convert_object_type.py +11 -1
  91. infrahub/graphql/mutations/display_label.py +17 -10
  92. infrahub/graphql/mutations/hfid.py +17 -10
  93. infrahub/graphql/mutations/ipam.py +54 -35
  94. infrahub/graphql/mutations/main.py +27 -28
  95. infrahub/graphql/mutations/proposed_change.py +6 -0
  96. infrahub/graphql/schema_sort.py +170 -0
  97. infrahub/graphql/types/branch.py +4 -1
  98. infrahub/graphql/types/enums.py +3 -0
  99. infrahub/hfid/tasks.py +13 -7
  100. infrahub/lock.py +52 -12
  101. infrahub/message_bus/types.py +3 -1
  102. infrahub/permissions/constants.py +2 -0
  103. infrahub/profiles/queries/get_profile_data.py +4 -5
  104. infrahub/proposed_change/tasks.py +66 -23
  105. infrahub/server.py +6 -2
  106. infrahub/services/__init__.py +2 -2
  107. infrahub/services/adapters/http/__init__.py +5 -0
  108. infrahub/services/adapters/workflow/worker.py +14 -3
  109. infrahub/task_manager/event.py +5 -0
  110. infrahub/task_manager/models.py +7 -0
  111. infrahub/task_manager/task.py +73 -0
  112. infrahub/trigger/setup.py +13 -4
  113. infrahub/trigger/tasks.py +3 -0
  114. infrahub/workers/dependencies.py +10 -1
  115. infrahub/workers/infrahub_async.py +10 -2
  116. infrahub/workflows/catalogue.py +8 -0
  117. infrahub/workflows/initialization.py +5 -0
  118. infrahub/workflows/utils.py +2 -1
  119. infrahub_sdk/analyzer.py +1 -1
  120. infrahub_sdk/batch.py +2 -2
  121. infrahub_sdk/branch.py +14 -2
  122. infrahub_sdk/checks.py +1 -1
  123. infrahub_sdk/client.py +15 -14
  124. infrahub_sdk/config.py +29 -2
  125. infrahub_sdk/ctl/branch.py +3 -0
  126. infrahub_sdk/ctl/cli_commands.py +2 -0
  127. infrahub_sdk/ctl/exceptions.py +1 -1
  128. infrahub_sdk/ctl/schema.py +22 -7
  129. infrahub_sdk/ctl/task.py +110 -0
  130. infrahub_sdk/exceptions.py +18 -18
  131. infrahub_sdk/graphql/query.py +2 -2
  132. infrahub_sdk/node/attribute.py +1 -1
  133. infrahub_sdk/node/property.py +1 -1
  134. infrahub_sdk/node/related_node.py +3 -3
  135. infrahub_sdk/node/relationship.py +4 -6
  136. infrahub_sdk/object_store.py +2 -2
  137. infrahub_sdk/operation.py +1 -1
  138. infrahub_sdk/protocols_generator/generator.py +1 -1
  139. infrahub_sdk/pytest_plugin/exceptions.py +9 -9
  140. infrahub_sdk/pytest_plugin/items/base.py +1 -1
  141. infrahub_sdk/pytest_plugin/items/check.py +1 -1
  142. infrahub_sdk/pytest_plugin/items/python_transform.py +1 -1
  143. infrahub_sdk/repository.py +1 -1
  144. infrahub_sdk/schema/__init__.py +33 -5
  145. infrahub_sdk/spec/models.py +7 -0
  146. infrahub_sdk/spec/object.py +41 -102
  147. infrahub_sdk/spec/processors/__init__.py +0 -0
  148. infrahub_sdk/spec/processors/data_processor.py +10 -0
  149. infrahub_sdk/spec/processors/factory.py +34 -0
  150. infrahub_sdk/spec/processors/range_expand_processor.py +56 -0
  151. infrahub_sdk/task/exceptions.py +4 -4
  152. infrahub_sdk/task/manager.py +2 -2
  153. infrahub_sdk/task/models.py +6 -4
  154. infrahub_sdk/timestamp.py +1 -1
  155. infrahub_sdk/transfer/exporter/json.py +1 -1
  156. infrahub_sdk/transfer/importer/json.py +1 -1
  157. infrahub_sdk/transforms.py +1 -1
  158. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/METADATA +4 -2
  159. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/RECORD +168 -152
  160. infrahub_testcontainers/container.py +144 -6
  161. infrahub_testcontainers/docker-compose-cluster.test.yml +5 -0
  162. infrahub_testcontainers/docker-compose.test.yml +5 -0
  163. infrahub_testcontainers/helpers.py +19 -4
  164. infrahub_testcontainers/models.py +8 -6
  165. infrahub_testcontainers/performance_test.py +6 -4
  166. infrahub/core/migrations/graph/m040_profile_attrs_in_db.py +0 -166
  167. infrahub/core/migrations/graph/m041_create_hfid_display_label_in_db.py +0 -97
  168. infrahub/core/migrations/graph/m042_backfill_hfid_display_label_in_db.py +0 -86
  169. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/LICENSE.txt +0 -0
  170. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/WHEEL +0 -0
  171. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/entry_points.txt +0 -0
@@ -1,23 +1,31 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import TYPE_CHECKING, Any, Sequence
3
+ from typing import TYPE_CHECKING, Any, Sequence, TypeAlias
4
4
 
5
5
  from pydantic import BaseModel, ConfigDict, Field
6
+ from rich.console import Console
6
7
  from typing_extensions import Self
7
8
 
8
9
  from infrahub.core import registry
9
10
  from infrahub.core.path import SchemaPath # noqa: TC001
10
11
  from infrahub.core.query import Query # noqa: TC001
11
- from infrahub.core.schema import (
12
- AttributeSchema,
13
- MainSchemaTypes,
14
- RelationshipSchema,
15
- SchemaRoot,
16
- internal_schema,
17
- )
12
+ from infrahub.core.schema import AttributeSchema, MainSchemaTypes, RelationshipSchema, SchemaRoot, internal_schema
18
13
  from infrahub.core.timestamp import Timestamp
19
14
 
20
- from .query import MigrationQuery # noqa: TC001
15
+ from .query import MigrationBaseQuery # noqa: TC001
16
+
17
+ MIGRATION_LOG_TIME_FORMAT = "[%Y-%m-%d %H:%M:%S]"
18
+ _migration_console: Console | None = None
19
+
20
+
21
+ def get_migration_console() -> Console:
22
+ global _migration_console
23
+
24
+ if _migration_console is None:
25
+ _migration_console = Console(log_time_format=MIGRATION_LOG_TIME_FORMAT)
26
+
27
+ return _migration_console
28
+
21
29
 
22
30
  if TYPE_CHECKING:
23
31
  from infrahub.core.branch import Branch
@@ -40,7 +48,9 @@ class MigrationResult(BaseModel):
40
48
  class SchemaMigration(BaseModel):
41
49
  model_config = ConfigDict(arbitrary_types_allowed=True)
42
50
  name: str = Field(..., description="Name of the migration")
43
- queries: Sequence[type[MigrationQuery]] = Field(..., description="List of queries to execute for this migration")
51
+ queries: Sequence[type[MigrationBaseQuery]] = Field(
52
+ ..., description="List of queries to execute for this migration"
53
+ )
44
54
 
45
55
  new_node_schema: MainSchemaTypes | None = None
46
56
  previous_node_schema: MainSchemaTypes | None = None
@@ -65,9 +75,14 @@ class SchemaMigration(BaseModel):
65
75
  return result
66
76
 
67
77
  async def execute_queries(
68
- self, db: InfrahubDatabase, result: MigrationResult, branch: Branch, at: Timestamp
78
+ self,
79
+ db: InfrahubDatabase,
80
+ result: MigrationResult,
81
+ branch: Branch,
82
+ at: Timestamp,
83
+ queries: Sequence[type[MigrationBaseQuery]],
69
84
  ) -> MigrationResult:
70
- for migration_query in self.queries:
85
+ for migration_query in queries:
71
86
  try:
72
87
  query = await migration_query.init(db=db, branch=branch, at=at, migration=self)
73
88
  await query.execute(db=db)
@@ -78,13 +93,20 @@ class SchemaMigration(BaseModel):
78
93
 
79
94
  return result
80
95
 
81
- async def execute(self, db: InfrahubDatabase, branch: Branch, at: Timestamp | str | None = None) -> MigrationResult:
96
+ async def execute(
97
+ self,
98
+ db: InfrahubDatabase,
99
+ branch: Branch,
100
+ at: Timestamp | str | None = None,
101
+ queries: Sequence[type[MigrationBaseQuery]] | None = None,
102
+ ) -> MigrationResult:
82
103
  async with db.start_transaction() as ts:
83
104
  result = MigrationResult()
84
105
  at = Timestamp(at)
85
106
 
86
107
  await self.execute_pre_queries(db=ts, result=result, branch=branch, at=at)
87
- await self.execute_queries(db=ts, result=result, branch=branch, at=at)
108
+ queries_to_execute = queries or self.queries
109
+ await self.execute_queries(db=ts, result=result, branch=branch, at=at, queries=queries_to_execute)
88
110
  await self.execute_post_queries(db=ts, result=result, branch=branch, at=at)
89
111
 
90
112
  return result
@@ -103,6 +125,8 @@ class SchemaMigration(BaseModel):
103
125
 
104
126
 
105
127
  class AttributeSchemaMigration(SchemaMigration):
128
+ uuids: list[str] | None = None
129
+
106
130
  @property
107
131
  def new_attribute_schema(self) -> AttributeSchema:
108
132
  if not self.schema_path.field_name:
@@ -214,3 +238,27 @@ class ArbitraryMigration(BaseModel):
214
238
 
215
239
  async def execute(self, db: InfrahubDatabase) -> MigrationResult:
216
240
  raise NotImplementedError()
241
+
242
+
243
+ class MigrationRequiringRebase(BaseModel):
244
+ model_config = ConfigDict(arbitrary_types_allowed=True)
245
+ name: str = Field(..., description="Name of the migration")
246
+ minimum_version: int = Field(..., description="Minimum version of the graph to execute this migration")
247
+
248
+ @classmethod
249
+ def init(cls, **kwargs: dict[str, Any]) -> Self:
250
+ return cls(**kwargs) # type: ignore[arg-type]
251
+
252
+ async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult:
253
+ raise NotImplementedError()
254
+
255
+ async def execute_against_branch(self, db: InfrahubDatabase, branch: Branch) -> MigrationResult:
256
+ """Method that will be run against non-default branches, it assumes that the branches have been rebased."""
257
+ raise NotImplementedError()
258
+
259
+ async def execute(self, db: InfrahubDatabase) -> MigrationResult:
260
+ """Method that will be run against the default branch."""
261
+ raise NotImplementedError()
262
+
263
+
264
+ MigrationTypes: TypeAlias = GraphMigration | InternalSchemaMigration | ArbitraryMigration | MigrationRequiringRebase
infrahub/core/models.py CHANGED
@@ -404,8 +404,8 @@ class HashableModelDiff(BaseModel):
404
404
  class HashableModel(BaseModel):
405
405
  model_config = ConfigDict(extra="forbid")
406
406
 
407
- id: str | None = None
408
- state: HashableModelState = HashableModelState.PRESENT
407
+ id: str | None = Field(default=None)
408
+ state: HashableModelState = Field(default=HashableModelState.PRESENT)
409
409
 
410
410
  _exclude_from_hash: list[str] = []
411
411
  _sort_by: list[str] = []
@@ -314,7 +314,9 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
314
314
 
315
315
  return cls(**attrs)
316
316
 
317
- async def handle_pool(self, db: InfrahubDatabase, attribute: BaseAttribute, errors: list) -> None:
317
+ async def handle_pool(
318
+ self, db: InfrahubDatabase, attribute: BaseAttribute, errors: list, allocate_resources: bool = True
319
+ ) -> None:
318
320
  """Evaluate if a resource has been requested from a pool and apply the resource
319
321
 
320
322
  This method only works on number pools, currently Integer is the only type that has the from_pool
@@ -325,7 +327,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
325
327
  attribute.from_pool = {"id": attribute.schema.parameters.number_pool_id}
326
328
  attribute.is_default = False
327
329
 
328
- if not attribute.from_pool:
330
+ if not attribute.from_pool or not allocate_resources:
329
331
  return
330
332
 
331
333
  try:
@@ -485,7 +487,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
485
487
  elif relationship_peers := await relationship.get_peers(db=db):
486
488
  fields[relationship_name] = [{"id": peer_id} for peer_id in relationship_peers]
487
489
 
488
- async def _process_fields(self, fields: dict, db: InfrahubDatabase) -> None:
490
+ async def _process_fields(self, fields: dict, db: InfrahubDatabase, process_pools: bool = True) -> None:
489
491
  errors = []
490
492
 
491
493
  if "_source" in fields.keys():
@@ -539,7 +541,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
539
541
  # Generate Attribute and Relationship and assign them
540
542
  # -------------------------------------------
541
543
  errors.extend(await self._process_fields_relationships(fields=fields, db=db))
542
- errors.extend(await self._process_fields_attributes(fields=fields, db=db))
544
+ errors.extend(await self._process_fields_attributes(fields=fields, db=db, process_pools=process_pools))
543
545
 
544
546
  if errors:
545
547
  raise ValidationError(errors)
@@ -576,7 +578,9 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
576
578
 
577
579
  return errors
578
580
 
579
- async def _process_fields_attributes(self, fields: dict, db: InfrahubDatabase) -> list[ValidationError]:
581
+ async def _process_fields_attributes(
582
+ self, fields: dict, db: InfrahubDatabase, process_pools: bool
583
+ ) -> list[ValidationError]:
580
584
  errors: list[ValidationError] = []
581
585
 
582
586
  for attr_schema in self._schema.attributes:
@@ -601,9 +605,10 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
601
605
  )
602
606
  if not self._existing:
603
607
  attribute: BaseAttribute = getattr(self, attr_schema.name)
604
- await self.handle_pool(db=db, attribute=attribute, errors=errors)
608
+ await self.handle_pool(db=db, attribute=attribute, errors=errors, allocate_resources=process_pools)
605
609
 
606
- attribute.validate(value=attribute.value, name=attribute.name, schema=attribute.schema)
610
+ if process_pools or attribute.from_pool is None:
611
+ attribute.validate(value=attribute.value, name=attribute.name, schema=attribute.schema)
607
612
  except ValidationError as exc:
608
613
  errors.append(exc)
609
614
 
@@ -731,7 +736,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
731
736
  self.label.value = " ".join([word.title() for word in self.name.value.split("_")])
732
737
  self.label.is_default = False
733
738
 
734
- async def new(self, db: InfrahubDatabase, id: str | None = None, **kwargs: Any) -> Self:
739
+ async def new(self, db: InfrahubDatabase, id: str | None = None, process_pools: bool = True, **kwargs: Any) -> Self:
735
740
  if id and not is_valid_uuid(id):
736
741
  raise ValidationError({"id": f"{id} is not a valid UUID"})
737
742
  if id:
@@ -741,15 +746,40 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
741
746
 
742
747
  self.id = id or str(UUIDT())
743
748
 
744
- await self._process_fields(db=db, fields=kwargs)
749
+ await self._process_fields(db=db, fields=kwargs, process_pools=process_pools)
745
750
  await self._process_macros(db=db)
746
751
 
747
752
  return self
748
753
 
749
754
  async def resolve_relationships(self, db: InfrahubDatabase) -> None:
755
+ extra_filters: dict[str, set[str]] = {}
756
+
757
+ if not self._existing:
758
+ # If we are creating a new node, we need to resolve extra filters from HFID and Display Labels,
759
+ # if we don't do this the fields might be blank
760
+ schema_branch = db.schema.get_schema_branch(name=self.get_branch_based_on_support_type().name)
761
+ try:
762
+ hfid_identifier = schema_branch.hfids.get_node_definition(kind=self._schema.kind)
763
+ for rel_name, attrs in hfid_identifier.relationship_fields.items():
764
+ extra_filters.setdefault(rel_name, set()).update(attrs)
765
+ except KeyError:
766
+ # No HFID defined for this kind
767
+ ...
768
+ try:
769
+ display_label_identifier = schema_branch.display_labels.get_template_node(kind=self._schema.kind)
770
+ for rel_name, attrs in display_label_identifier.relationship_fields.items():
771
+ extra_filters.setdefault(rel_name, set()).update(attrs)
772
+ except KeyError:
773
+ # No Display Label defined for this kind
774
+ ...
775
+
750
776
  for name in self._relationships:
751
777
  relm: RelationshipManager = getattr(self, name)
752
- await relm.resolve(db=db)
778
+ query_filter = []
779
+ if name in extra_filters:
780
+ query_filter.extend(list(extra_filters[name]))
781
+
782
+ await relm.resolve(db=db, fields=query_filter)
753
783
 
754
784
  async def load(
755
785
  self,
@@ -1046,7 +1076,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
1046
1076
 
1047
1077
  return response
1048
1078
 
1049
- async def from_graphql(self, data: dict, db: InfrahubDatabase) -> bool:
1079
+ async def from_graphql(self, data: dict, db: InfrahubDatabase, process_pools: bool = True) -> bool:
1050
1080
  """Update object from a GraphQL payload."""
1051
1081
 
1052
1082
  changed = False
@@ -1054,7 +1084,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
1054
1084
  for key, value in data.items():
1055
1085
  if key in self._attributes and isinstance(value, dict):
1056
1086
  attribute = getattr(self, key)
1057
- changed |= await attribute.from_graphql(data=value, db=db)
1087
+ changed |= await attribute.from_graphql(data=value, db=db, process_pools=process_pools)
1058
1088
 
1059
1089
  if key in self._relationships:
1060
1090
  rel: RelationshipManager = getattr(self, key)
@@ -7,7 +7,7 @@ from infrahub.core import registry
7
7
  from infrahub.core.constants import RelationshipCardinality, RelationshipKind
8
8
  from infrahub.core.constraint.node.runner import NodeConstraintRunner
9
9
  from infrahub.core.node import Node
10
- from infrahub.core.node.lock_utils import get_kind_lock_names_on_object_mutation
10
+ from infrahub.core.node.lock_utils import get_lock_names_on_object_mutation
11
11
  from infrahub.core.protocols import CoreObjectTemplate
12
12
  from infrahub.core.schema import GenericSchema
13
13
  from infrahub.dependencies.registry import get_component_registry
@@ -62,14 +62,25 @@ async def extract_peer_data(
62
62
 
63
63
  for rel in template_peer.get_schema().relationship_names:
64
64
  rel_manager: RelationshipManager = getattr(template_peer, rel)
65
- if (
66
- rel_manager.schema.kind not in [RelationshipKind.COMPONENT, RelationshipKind.PARENT]
67
- or rel_manager.schema.name not in obj_peer_schema.relationship_names
68
- ):
65
+
66
+ if rel_manager.schema.name not in obj_peer_schema.relationship_names:
69
67
  continue
70
68
 
71
- if list(await rel_manager.get_peers(db=db)) == [current_template.id]:
69
+ peers_map = await rel_manager.get_peers(db=db)
70
+ if rel_manager.schema.kind in [RelationshipKind.COMPONENT, RelationshipKind.PARENT] and list(
71
+ peers_map.keys()
72
+ ) == [current_template.id]:
72
73
  obj_peer_data[rel] = {"id": parent_obj.id}
74
+ continue
75
+
76
+ rel_peer_ids = []
77
+ for peer_id, peer_object in peers_map.items():
78
+ # deeper templates are handled in the next level of recursion
79
+ if peer_object.get_schema().is_template_schema:
80
+ continue
81
+ rel_peer_ids.append({"id": peer_id})
82
+
83
+ obj_peer_data[rel] = rel_peer_ids
73
84
 
74
85
  return obj_peer_data
75
86
 
@@ -160,45 +171,6 @@ async def _do_create_node(
160
171
  return obj
161
172
 
162
173
 
163
- async def _do_create_node_with_lock(
164
- node_class: type[Node],
165
- node_constraint_runner: NodeConstraintRunner,
166
- db: InfrahubDatabase,
167
- schema: NonGenericSchemaTypes,
168
- branch: Branch,
169
- fields_to_validate: list[str],
170
- data: dict[str, Any],
171
- at: Timestamp | None = None,
172
- ) -> Node:
173
- schema_branch = registry.schema.get_schema_branch(name=branch.name)
174
- lock_names = get_kind_lock_names_on_object_mutation(
175
- kind=schema.kind, branch=branch, schema_branch=schema_branch, data=dict(data)
176
- )
177
-
178
- if lock_names:
179
- async with InfrahubMultiLock(lock_registry=lock.registry, locks=lock_names):
180
- return await _do_create_node(
181
- node_class=node_class,
182
- node_constraint_runner=node_constraint_runner,
183
- db=db,
184
- schema=schema,
185
- branch=branch,
186
- fields_to_validate=fields_to_validate,
187
- data=data,
188
- at=at,
189
- )
190
- return await _do_create_node(
191
- node_class=node_class,
192
- node_constraint_runner=node_constraint_runner,
193
- db=db,
194
- schema=schema,
195
- branch=branch,
196
- fields_to_validate=fields_to_validate,
197
- data=data,
198
- at=at,
199
- )
200
-
201
-
202
174
  async def create_node(
203
175
  data: dict[str, Any],
204
176
  db: InfrahubDatabase,
@@ -212,37 +184,48 @@ async def create_node(
212
184
  raise ValueError(f"Node of generic schema `{schema.name=}` can not be instantiated.")
213
185
 
214
186
  component_registry = get_component_registry()
215
- node_constraint_runner = await component_registry.get_component(
216
- NodeConstraintRunner, db=db.start_session() if not db.is_transaction else db, branch=branch
217
- )
218
187
  node_class = Node
219
188
  if schema.kind in registry.node:
220
189
  node_class = registry.node[schema.kind]
221
190
 
222
191
  fields_to_validate = list(data)
223
- if db.is_transaction:
224
- obj = await _do_create_node_with_lock(
225
- node_class=node_class,
226
- node_constraint_runner=node_constraint_runner,
227
- db=db,
228
- schema=schema,
229
- branch=branch,
230
- fields_to_validate=fields_to_validate,
231
- data=data,
232
- at=at,
233
- )
234
- else:
235
- async with db.start_transaction() as dbt:
236
- obj = await _do_create_node_with_lock(
192
+
193
+ preview_obj = await node_class.init(db=db, schema=schema, branch=branch)
194
+ await preview_obj.new(db=db, process_pools=False, **data)
195
+ schema_branch = db.schema.get_schema_branch(name=branch.name)
196
+ lock_names = get_lock_names_on_object_mutation(node=preview_obj, schema_branch=schema_branch)
197
+
198
+ obj: Node
199
+ async with InfrahubMultiLock(lock_registry=lock.registry, locks=lock_names, metrics=False):
200
+ if db.is_transaction:
201
+ node_constraint_runner = await component_registry.get_component(NodeConstraintRunner, db=db, branch=branch)
202
+
203
+ obj = await _do_create_node(
237
204
  node_class=node_class,
238
205
  node_constraint_runner=node_constraint_runner,
239
- db=dbt,
206
+ db=db,
240
207
  schema=schema,
241
208
  branch=branch,
242
209
  fields_to_validate=fields_to_validate,
243
210
  data=data,
244
211
  at=at,
245
212
  )
213
+ else:
214
+ async with db.start_transaction() as dbt:
215
+ node_constraint_runner = await component_registry.get_component(
216
+ NodeConstraintRunner, db=dbt, branch=branch
217
+ )
218
+
219
+ obj = await _do_create_node(
220
+ node_class=node_class,
221
+ node_constraint_runner=node_constraint_runner,
222
+ db=dbt,
223
+ schema=schema,
224
+ branch=branch,
225
+ fields_to_validate=fields_to_validate,
226
+ data=data,
227
+ at=at,
228
+ )
246
229
 
247
230
  if await get_profile_ids(db=db, obj=obj):
248
231
  node_profiles_applier = NodeProfilesApplier(db=db, branch=branch)
@@ -1,12 +1,15 @@
1
1
  import hashlib
2
- from typing import Any
2
+ from typing import TYPE_CHECKING
3
3
 
4
- from infrahub.core.branch import Branch
5
- from infrahub.core.constants.infrahubkind import GENERICGROUP, GRAPHQLQUERYGROUP
4
+ from infrahub.core.node import Node
6
5
  from infrahub.core.schema import GenericSchema
7
6
  from infrahub.core.schema.schema_branch import SchemaBranch
8
7
 
9
- KINDS_CONCURRENT_MUTATIONS_NOT_ALLOWED = [GENERICGROUP]
8
+ if TYPE_CHECKING:
9
+ from infrahub.core.relationship import RelationshipManager
10
+
11
+
12
+ RESOURCE_POOL_LOCK_NAMESPACE = "resource_pool"
10
13
 
11
14
 
12
15
  def _get_kinds_to_lock_on_object_mutation(kind: str, schema_branch: SchemaBranch) -> list[str]:
@@ -43,55 +46,78 @@ def _get_kinds_to_lock_on_object_mutation(kind: str, schema_branch: SchemaBranch
43
46
  return kinds
44
47
 
45
48
 
46
- def _should_kind_be_locked_on_any_branch(kind: str, schema_branch: SchemaBranch) -> bool:
47
- """
48
- Check whether kind or any kind generic is in KINDS_TO_LOCK_ON_ANY_BRANCH.
49
- """
50
-
51
- if kind in KINDS_CONCURRENT_MUTATIONS_NOT_ALLOWED:
52
- return True
53
-
54
- node_schema = schema_branch.get(name=kind, duplicate=False)
55
- if isinstance(node_schema, GenericSchema):
56
- return False
57
-
58
- for generic_kind in node_schema.inherit_from:
59
- if generic_kind in KINDS_CONCURRENT_MUTATIONS_NOT_ALLOWED:
60
- return True
61
- return False
62
-
63
-
64
49
  def _hash(value: str) -> str:
65
50
  # Do not use builtin `hash` for lock names as due to randomization results would differ between
66
51
  # different processes.
67
52
  return hashlib.sha256(value.encode()).hexdigest()
68
53
 
69
54
 
70
- def get_kind_lock_names_on_object_mutation(
71
- kind: str, branch: Branch, schema_branch: SchemaBranch, data: dict[str, Any]
72
- ) -> list[str]:
55
+ def get_lock_names_on_object_mutation(node: Node, schema_branch: SchemaBranch) -> list[str]:
73
56
  """
74
- Return objects kind for which we want to avoid concurrent mutation (create/update). Except for some specific kinds,
75
- concurrent mutations are only allowed on non-main branch as objects validations will be performed at least when merging in main branch.
57
+ Return lock names for object on which we want to avoid concurrent mutation (create/update).
58
+ Lock names include kind, some generic kinds, resource pool ids, and values of attributes of corresponding uniqueness constraints.
76
59
  """
77
60
 
78
- if not branch.is_default and not _should_kind_be_locked_on_any_branch(kind=kind, schema_branch=schema_branch):
79
- return []
80
-
81
- if kind == GRAPHQLQUERYGROUP:
82
- # Lock on name as well to improve performances
83
- try:
84
- name = data["name"].value
85
- return [build_object_lock_name(kind + "." + _hash(name))]
86
- except KeyError:
87
- # We might reach here if we are updating a CoreGraphQLQueryGroup without updating the name,
88
- # in which case we would not need to lock. This is not supposed to happen as current `update`
89
- # logic first fetches the node with its name.
90
- return []
91
-
92
- lock_kinds = _get_kinds_to_lock_on_object_mutation(kind, schema_branch)
93
- lock_names = [build_object_lock_name(kind) for kind in lock_kinds]
94
- return lock_names
61
+ lock_names: set[str] = set()
62
+
63
+ # Check if node is using resource manager allocation via attributes
64
+ for attr_name in node.get_schema().attribute_names:
65
+ attribute = getattr(node, attr_name, None)
66
+ if attribute is not None and getattr(attribute, "from_pool", None) and "id" in attribute.from_pool:
67
+ lock_names.add(f"{RESOURCE_POOL_LOCK_NAMESPACE}.{attribute.from_pool['id']}")
68
+
69
+ # Check if relationships allocate resources
70
+ for rel_name in node._relationships:
71
+ rel_manager: RelationshipManager = getattr(node, rel_name)
72
+ for rel in rel_manager._relationships:
73
+ if rel.from_pool and "id" in rel.from_pool:
74
+ lock_names.add(f"{RESOURCE_POOL_LOCK_NAMESPACE}.{rel.from_pool['id']}")
75
+
76
+ lock_kinds = _get_kinds_to_lock_on_object_mutation(node.get_kind(), schema_branch)
77
+ for kind in lock_kinds:
78
+ schema = schema_branch.get(name=kind, duplicate=False)
79
+ ucs = schema.uniqueness_constraints
80
+ if ucs is None:
81
+ continue
82
+
83
+ ucs_lock_names: list[str] = []
84
+ uc_attributes_names = set()
85
+
86
+ for uc in ucs:
87
+ uc_attributes_values = []
88
+ # Keep only attributes constraints
89
+ for field_path in uc:
90
+ # Some attributes may exist in different uniqueness constraints, we de-duplicate them
91
+ if field_path in uc_attributes_names:
92
+ continue
93
+
94
+ # Exclude relationships uniqueness constraints
95
+ schema_path = schema.parse_schema_path(path=field_path, schema=schema_branch)
96
+ if schema_path.related_schema is not None or schema_path.attribute_schema is None:
97
+ continue
98
+
99
+ uc_attributes_names.add(field_path)
100
+ attr = getattr(node, schema_path.attribute_schema.name, None)
101
+ if attr is None or attr.value is None:
102
+ # `attr.value` being None corresponds to optional unique attribute.
103
+ # `attr` being None is not supposed to happen.
104
+ value_hashed = _hash("")
105
+ else:
106
+ value_hashed = _hash(str(attr.value))
107
+
108
+ uc_attributes_values.append(value_hashed)
109
+
110
+ if uc_attributes_values:
111
+ uc_lock_name = ".".join(uc_attributes_values)
112
+ ucs_lock_names.append(uc_lock_name)
113
+
114
+ if not ucs_lock_names:
115
+ continue
116
+
117
+ partial_lock_name = kind + "." + ".".join(ucs_lock_names)
118
+ lock_names.add(build_object_lock_name(partial_lock_name))
119
+
120
+ return sorted(lock_names)
95
121
 
96
122
 
97
123
  def build_object_lock_name(name: str) -> str:
@@ -15,6 +15,7 @@ from infrahub.exceptions import PoolExhaustedError, ValidationError
15
15
  from infrahub.pools.address import get_available
16
16
 
17
17
  from .. import Node
18
+ from ..lock_utils import RESOURCE_POOL_LOCK_NAMESPACE
18
19
 
19
20
  if TYPE_CHECKING:
20
21
  from infrahub.core.branch import Branch
@@ -34,7 +35,7 @@ class CoreIPAddressPool(Node):
34
35
  prefixlen: int | None = None,
35
36
  at: Timestamp | None = None,
36
37
  ) -> Node:
37
- async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
38
+ async with lock.registry.get(name=self.get_id(), namespace=RESOURCE_POOL_LOCK_NAMESPACE):
38
39
  # Check if there is already a resource allocated with this identifier
39
40
  # if not, pull all existing prefixes and allocated the next available
40
41
 
@@ -17,6 +17,7 @@ from infrahub.exceptions import ValidationError
17
17
  from infrahub.pools.prefix import get_next_available_prefix
18
18
 
19
19
  from .. import Node
20
+ from ..lock_utils import RESOURCE_POOL_LOCK_NAMESPACE
20
21
 
21
22
  if TYPE_CHECKING:
22
23
  from infrahub.core.branch import Branch
@@ -37,7 +38,7 @@ class CoreIPPrefixPool(Node):
37
38
  prefix_type: str | None = None,
38
39
  at: Timestamp | None = None,
39
40
  ) -> Node:
40
- async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
41
+ async with lock.registry.get(name=self.get_id(), namespace=RESOURCE_POOL_LOCK_NAMESPACE):
41
42
  # Check if there is already a resource allocated with this identifier
42
43
  # if not, pull all existing prefixes and allocated the next available
43
44
  if identifier:
@@ -9,6 +9,7 @@ from infrahub.core.schema.attribute_parameters import NumberAttributeParameters
9
9
  from infrahub.exceptions import PoolExhaustedError
10
10
 
11
11
  from .. import Node
12
+ from ..lock_utils import RESOURCE_POOL_LOCK_NAMESPACE
12
13
 
13
14
  if TYPE_CHECKING:
14
15
  from infrahub.core.branch import Branch
@@ -63,7 +64,7 @@ class CoreNumberPool(Node):
63
64
  identifier: str | None = None,
64
65
  at: Timestamp | None = None,
65
66
  ) -> int:
66
- async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
67
+ async with lock.registry.get(name=self.get_id(), namespace=RESOURCE_POOL_LOCK_NAMESPACE):
67
68
  # NOTE: ideally we should use the HFID as the identifier (if available)
68
69
  # one of the challenge with using the HFID is that it might change over time
69
70
  # so we need to ensure that the identifier is stable, or we need to handle the case where the identifier changes
@@ -184,6 +184,61 @@ class AttributeUpdateNodePropertyQuery(AttributeQuery):
184
184
  self.return_labels = ["a", "np", "r"]
185
185
 
186
186
 
187
+ class AttributeClearNodePropertyQuery(AttributeQuery):
188
+ name = "attribute_clear_node_property"
189
+ type: QueryType = QueryType.WRITE
190
+ insert_return: bool = False
191
+
192
+ def __init__(
193
+ self,
194
+ prop_name: str,
195
+ prop_id: str | None = None,
196
+ **kwargs: Any,
197
+ ):
198
+ self.prop_name = prop_name
199
+ self.prop_id = prop_id
200
+
201
+ super().__init__(**kwargs)
202
+
203
+ async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
204
+ at = self.at or self.attr.at
205
+
206
+ branch_filter, branch_params = self.branch.get_query_filter_path(at=at)
207
+ self.params.update(branch_params)
208
+ self.params["attr_uuid"] = self.attr.id
209
+ self.params["branch"] = self.branch.name
210
+ self.params["branch_level"] = self.branch.hierarchy_level
211
+ self.params["at"] = at.to_string()
212
+ self.params["prop_name"] = self.prop_name
213
+ self.params["prop_id"] = self.prop_id
214
+
215
+ rel_label = f"HAS_{self.prop_name.upper()}"
216
+ query = """
217
+ MATCH (a:Attribute { uuid: $attr_uuid })-[r:%(rel_label)s]->(np:Node { uuid: $prop_id })
218
+ WITH DISTINCT a, np
219
+ CALL (a, np) {
220
+ MATCH (a)-[r:%(rel_label)s]->(np)
221
+ WHERE %(branch_filter)s
222
+ ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
223
+ LIMIT 1
224
+ RETURN r AS property_edge
225
+ }
226
+ WITH a, np, property_edge
227
+ WHERE property_edge.status = "active"
228
+ CALL (property_edge) {
229
+ WITH property_edge
230
+ WHERE property_edge.branch = $branch
231
+ SET property_edge.to = $at
232
+ }
233
+ CALL (a, np, property_edge) {
234
+ WITH property_edge
235
+ WHERE property_edge.branch_level < $branch_level
236
+ CREATE (a)-[r:%(rel_label)s { branch: $branch, branch_level: $branch_level, status: "deleted", from: $at }]->(np)
237
+ }
238
+ """ % {"branch_filter": branch_filter, "rel_label": rel_label}
239
+ self.add_to_query(query)
240
+
241
+
187
242
  class AttributeGetQuery(AttributeQuery):
188
243
  name = "attribute_get"
189
244
  type: QueryType = QueryType.READ