infrahub-server 1.5.0b1__py3-none-any.whl → 1.5.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. infrahub/api/internal.py +2 -0
  2. infrahub/api/oauth2.py +13 -19
  3. infrahub/api/oidc.py +15 -21
  4. infrahub/api/schema.py +24 -3
  5. infrahub/artifacts/models.py +2 -1
  6. infrahub/auth.py +137 -3
  7. infrahub/cli/__init__.py +2 -0
  8. infrahub/cli/db.py +83 -102
  9. infrahub/cli/dev.py +118 -0
  10. infrahub/cli/tasks.py +46 -0
  11. infrahub/cli/upgrade.py +30 -3
  12. infrahub/computed_attribute/tasks.py +20 -8
  13. infrahub/core/attribute.py +10 -2
  14. infrahub/core/branch/enums.py +1 -1
  15. infrahub/core/branch/models.py +7 -3
  16. infrahub/core/branch/tasks.py +68 -7
  17. infrahub/core/constants/__init__.py +3 -0
  18. infrahub/core/diff/query/artifact.py +1 -0
  19. infrahub/core/diff/query/field_summary.py +1 -0
  20. infrahub/core/graph/__init__.py +1 -1
  21. infrahub/core/initialization.py +5 -2
  22. infrahub/core/migrations/__init__.py +3 -0
  23. infrahub/core/migrations/exceptions.py +4 -0
  24. infrahub/core/migrations/graph/__init__.py +10 -13
  25. infrahub/core/migrations/graph/load_schema_branch.py +21 -0
  26. infrahub/core/migrations/graph/m013_convert_git_password_credential.py +1 -1
  27. infrahub/core/migrations/graph/m040_duplicated_attributes.py +81 -0
  28. infrahub/core/migrations/graph/m041_profile_attrs_in_db.py +145 -0
  29. infrahub/core/migrations/graph/m042_create_hfid_display_label_in_db.py +164 -0
  30. infrahub/core/migrations/graph/m043_backfill_hfid_display_label_in_db.py +866 -0
  31. infrahub/core/migrations/query/__init__.py +7 -8
  32. infrahub/core/migrations/query/attribute_add.py +8 -6
  33. infrahub/core/migrations/query/attribute_remove.py +134 -0
  34. infrahub/core/migrations/runner.py +54 -0
  35. infrahub/core/migrations/schema/attribute_kind_update.py +9 -3
  36. infrahub/core/migrations/schema/attribute_supports_profile.py +90 -0
  37. infrahub/core/migrations/schema/node_attribute_add.py +30 -2
  38. infrahub/core/migrations/schema/node_attribute_remove.py +13 -109
  39. infrahub/core/migrations/schema/node_kind_update.py +2 -1
  40. infrahub/core/migrations/schema/node_remove.py +2 -1
  41. infrahub/core/migrations/schema/placeholder_dummy.py +3 -2
  42. infrahub/core/migrations/shared.py +48 -14
  43. infrahub/core/node/__init__.py +16 -11
  44. infrahub/core/node/create.py +46 -63
  45. infrahub/core/node/lock_utils.py +70 -44
  46. infrahub/core/node/resource_manager/ip_address_pool.py +2 -1
  47. infrahub/core/node/resource_manager/ip_prefix_pool.py +2 -1
  48. infrahub/core/node/resource_manager/number_pool.py +2 -1
  49. infrahub/core/query/attribute.py +55 -0
  50. infrahub/core/query/ipam.py +1 -0
  51. infrahub/core/query/node.py +9 -3
  52. infrahub/core/query/relationship.py +1 -0
  53. infrahub/core/schema/__init__.py +56 -0
  54. infrahub/core/schema/attribute_schema.py +4 -0
  55. infrahub/core/schema/definitions/internal.py +2 -2
  56. infrahub/core/schema/generated/attribute_schema.py +2 -2
  57. infrahub/core/schema/manager.py +22 -1
  58. infrahub/core/schema/schema_branch.py +180 -22
  59. infrahub/database/graph.py +21 -0
  60. infrahub/display_labels/tasks.py +13 -7
  61. infrahub/events/branch_action.py +27 -1
  62. infrahub/generators/tasks.py +3 -7
  63. infrahub/git/base.py +4 -1
  64. infrahub/git/integrator.py +1 -1
  65. infrahub/git/models.py +2 -1
  66. infrahub/git/repository.py +22 -5
  67. infrahub/git/tasks.py +66 -10
  68. infrahub/git/utils.py +123 -1
  69. infrahub/graphql/api/endpoints.py +14 -4
  70. infrahub/graphql/manager.py +4 -9
  71. infrahub/graphql/mutations/convert_object_type.py +11 -1
  72. infrahub/graphql/mutations/display_label.py +17 -10
  73. infrahub/graphql/mutations/hfid.py +17 -10
  74. infrahub/graphql/mutations/ipam.py +54 -35
  75. infrahub/graphql/mutations/main.py +27 -28
  76. infrahub/graphql/schema_sort.py +170 -0
  77. infrahub/graphql/types/branch.py +4 -1
  78. infrahub/graphql/types/enums.py +3 -0
  79. infrahub/hfid/tasks.py +13 -7
  80. infrahub/lock.py +52 -12
  81. infrahub/message_bus/types.py +2 -1
  82. infrahub/permissions/constants.py +2 -0
  83. infrahub/proposed_change/tasks.py +25 -16
  84. infrahub/server.py +6 -2
  85. infrahub/services/__init__.py +2 -2
  86. infrahub/services/adapters/http/__init__.py +5 -0
  87. infrahub/services/adapters/workflow/worker.py +14 -3
  88. infrahub/task_manager/event.py +5 -0
  89. infrahub/task_manager/models.py +7 -0
  90. infrahub/task_manager/task.py +73 -0
  91. infrahub/trigger/setup.py +13 -4
  92. infrahub/trigger/tasks.py +3 -0
  93. infrahub/workers/dependencies.py +10 -1
  94. infrahub/workers/infrahub_async.py +10 -2
  95. infrahub/workflows/catalogue.py +8 -0
  96. infrahub/workflows/initialization.py +5 -0
  97. infrahub/workflows/utils.py +2 -1
  98. infrahub_sdk/client.py +13 -10
  99. infrahub_sdk/config.py +29 -2
  100. infrahub_sdk/ctl/schema.py +22 -7
  101. infrahub_sdk/schema/__init__.py +32 -4
  102. infrahub_sdk/spec/models.py +7 -0
  103. infrahub_sdk/spec/object.py +37 -102
  104. infrahub_sdk/spec/processors/__init__.py +0 -0
  105. infrahub_sdk/spec/processors/data_processor.py +10 -0
  106. infrahub_sdk/spec/processors/factory.py +34 -0
  107. infrahub_sdk/spec/processors/range_expand_processor.py +56 -0
  108. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/METADATA +3 -1
  109. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/RECORD +115 -101
  110. infrahub_testcontainers/container.py +114 -2
  111. infrahub_testcontainers/docker-compose-cluster.test.yml +5 -0
  112. infrahub_testcontainers/docker-compose.test.yml +5 -0
  113. infrahub/core/migrations/graph/m040_profile_attrs_in_db.py +0 -166
  114. infrahub/core/migrations/graph/m041_create_hfid_display_label_in_db.py +0 -97
  115. infrahub/core/migrations/graph/m042_backfill_hfid_display_label_in_db.py +0 -86
  116. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/LICENSE.txt +0 -0
  117. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/WHEEL +0 -0
  118. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import TYPE_CHECKING, Any, Sequence
3
+ from typing import TYPE_CHECKING, Any, Sequence, TypeAlias
4
4
 
5
5
  from pydantic import BaseModel, ConfigDict, Field
6
6
  from typing_extensions import Self
@@ -8,16 +8,10 @@ from typing_extensions import Self
8
8
  from infrahub.core import registry
9
9
  from infrahub.core.path import SchemaPath # noqa: TC001
10
10
  from infrahub.core.query import Query # noqa: TC001
11
- from infrahub.core.schema import (
12
- AttributeSchema,
13
- MainSchemaTypes,
14
- RelationshipSchema,
15
- SchemaRoot,
16
- internal_schema,
17
- )
11
+ from infrahub.core.schema import AttributeSchema, MainSchemaTypes, RelationshipSchema, SchemaRoot, internal_schema
18
12
  from infrahub.core.timestamp import Timestamp
19
13
 
20
- from .query import MigrationQuery # noqa: TC001
14
+ from .query import MigrationBaseQuery # noqa: TC001
21
15
 
22
16
  if TYPE_CHECKING:
23
17
  from infrahub.core.branch import Branch
@@ -40,7 +34,9 @@ class MigrationResult(BaseModel):
40
34
  class SchemaMigration(BaseModel):
41
35
  model_config = ConfigDict(arbitrary_types_allowed=True)
42
36
  name: str = Field(..., description="Name of the migration")
43
- queries: Sequence[type[MigrationQuery]] = Field(..., description="List of queries to execute for this migration")
37
+ queries: Sequence[type[MigrationBaseQuery]] = Field(
38
+ ..., description="List of queries to execute for this migration"
39
+ )
44
40
 
45
41
  new_node_schema: MainSchemaTypes | None = None
46
42
  previous_node_schema: MainSchemaTypes | None = None
@@ -65,9 +61,14 @@ class SchemaMigration(BaseModel):
65
61
  return result
66
62
 
67
63
  async def execute_queries(
68
- self, db: InfrahubDatabase, result: MigrationResult, branch: Branch, at: Timestamp
64
+ self,
65
+ db: InfrahubDatabase,
66
+ result: MigrationResult,
67
+ branch: Branch,
68
+ at: Timestamp,
69
+ queries: Sequence[type[MigrationBaseQuery]],
69
70
  ) -> MigrationResult:
70
- for migration_query in self.queries:
71
+ for migration_query in queries:
71
72
  try:
72
73
  query = await migration_query.init(db=db, branch=branch, at=at, migration=self)
73
74
  await query.execute(db=db)
@@ -78,13 +79,20 @@ class SchemaMigration(BaseModel):
78
79
 
79
80
  return result
80
81
 
81
- async def execute(self, db: InfrahubDatabase, branch: Branch, at: Timestamp | str | None = None) -> MigrationResult:
82
+ async def execute(
83
+ self,
84
+ db: InfrahubDatabase,
85
+ branch: Branch,
86
+ at: Timestamp | str | None = None,
87
+ queries: Sequence[type[MigrationBaseQuery]] | None = None,
88
+ ) -> MigrationResult:
82
89
  async with db.start_transaction() as ts:
83
90
  result = MigrationResult()
84
91
  at = Timestamp(at)
85
92
 
86
93
  await self.execute_pre_queries(db=ts, result=result, branch=branch, at=at)
87
- await self.execute_queries(db=ts, result=result, branch=branch, at=at)
94
+ queries_to_execute = queries or self.queries
95
+ await self.execute_queries(db=ts, result=result, branch=branch, at=at, queries=queries_to_execute)
88
96
  await self.execute_post_queries(db=ts, result=result, branch=branch, at=at)
89
97
 
90
98
  return result
@@ -103,6 +111,8 @@ class SchemaMigration(BaseModel):
103
111
 
104
112
 
105
113
  class AttributeSchemaMigration(SchemaMigration):
114
+ uuids: list[str] | None = None
115
+
106
116
  @property
107
117
  def new_attribute_schema(self) -> AttributeSchema:
108
118
  if not self.schema_path.field_name:
@@ -214,3 +224,27 @@ class ArbitraryMigration(BaseModel):
214
224
 
215
225
  async def execute(self, db: InfrahubDatabase) -> MigrationResult:
216
226
  raise NotImplementedError()
227
+
228
+
229
+ class MigrationRequiringRebase(BaseModel):
230
+ model_config = ConfigDict(arbitrary_types_allowed=True)
231
+ name: str = Field(..., description="Name of the migration")
232
+ minimum_version: int = Field(..., description="Minimum version of the graph to execute this migration")
233
+
234
+ @classmethod
235
+ def init(cls, **kwargs: dict[str, Any]) -> Self:
236
+ return cls(**kwargs) # type: ignore[arg-type]
237
+
238
+ async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult:
239
+ raise NotImplementedError()
240
+
241
+ async def execute_against_branch(self, db: InfrahubDatabase, branch: Branch) -> MigrationResult:
242
+ """Method that will be run against non-default branches, it assumes that the branches have been rebased."""
243
+ raise NotImplementedError()
244
+
245
+ async def execute(self, db: InfrahubDatabase) -> MigrationResult:
246
+ """Method that will be run against the default branch."""
247
+ raise NotImplementedError()
248
+
249
+
250
+ MigrationTypes: TypeAlias = GraphMigration | InternalSchemaMigration | ArbitraryMigration | MigrationRequiringRebase
@@ -314,7 +314,9 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
314
314
 
315
315
  return cls(**attrs)
316
316
 
317
- async def handle_pool(self, db: InfrahubDatabase, attribute: BaseAttribute, errors: list) -> None:
317
+ async def handle_pool(
318
+ self, db: InfrahubDatabase, attribute: BaseAttribute, errors: list, allocate_resources: bool = True
319
+ ) -> None:
318
320
  """Evaluate if a resource has been requested from a pool and apply the resource
319
321
 
320
322
  This method only works on number pools, currently Integer is the only type that has the from_pool
@@ -325,7 +327,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
325
327
  attribute.from_pool = {"id": attribute.schema.parameters.number_pool_id}
326
328
  attribute.is_default = False
327
329
 
328
- if not attribute.from_pool:
330
+ if not attribute.from_pool or not allocate_resources:
329
331
  return
330
332
 
331
333
  try:
@@ -485,7 +487,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
485
487
  elif relationship_peers := await relationship.get_peers(db=db):
486
488
  fields[relationship_name] = [{"id": peer_id} for peer_id in relationship_peers]
487
489
 
488
- async def _process_fields(self, fields: dict, db: InfrahubDatabase) -> None:
490
+ async def _process_fields(self, fields: dict, db: InfrahubDatabase, process_pools: bool = True) -> None:
489
491
  errors = []
490
492
 
491
493
  if "_source" in fields.keys():
@@ -539,7 +541,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
539
541
  # Generate Attribute and Relationship and assign them
540
542
  # -------------------------------------------
541
543
  errors.extend(await self._process_fields_relationships(fields=fields, db=db))
542
- errors.extend(await self._process_fields_attributes(fields=fields, db=db))
544
+ errors.extend(await self._process_fields_attributes(fields=fields, db=db, process_pools=process_pools))
543
545
 
544
546
  if errors:
545
547
  raise ValidationError(errors)
@@ -576,7 +578,9 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
576
578
 
577
579
  return errors
578
580
 
579
- async def _process_fields_attributes(self, fields: dict, db: InfrahubDatabase) -> list[ValidationError]:
581
+ async def _process_fields_attributes(
582
+ self, fields: dict, db: InfrahubDatabase, process_pools: bool
583
+ ) -> list[ValidationError]:
580
584
  errors: list[ValidationError] = []
581
585
 
582
586
  for attr_schema in self._schema.attributes:
@@ -601,9 +605,10 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
601
605
  )
602
606
  if not self._existing:
603
607
  attribute: BaseAttribute = getattr(self, attr_schema.name)
604
- await self.handle_pool(db=db, attribute=attribute, errors=errors)
608
+ await self.handle_pool(db=db, attribute=attribute, errors=errors, allocate_resources=process_pools)
605
609
 
606
- attribute.validate(value=attribute.value, name=attribute.name, schema=attribute.schema)
610
+ if process_pools or attribute.from_pool is None:
611
+ attribute.validate(value=attribute.value, name=attribute.name, schema=attribute.schema)
607
612
  except ValidationError as exc:
608
613
  errors.append(exc)
609
614
 
@@ -731,7 +736,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
731
736
  self.label.value = " ".join([word.title() for word in self.name.value.split("_")])
732
737
  self.label.is_default = False
733
738
 
734
- async def new(self, db: InfrahubDatabase, id: str | None = None, **kwargs: Any) -> Self:
739
+ async def new(self, db: InfrahubDatabase, id: str | None = None, process_pools: bool = True, **kwargs: Any) -> Self:
735
740
  if id and not is_valid_uuid(id):
736
741
  raise ValidationError({"id": f"{id} is not a valid UUID"})
737
742
  if id:
@@ -741,7 +746,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
741
746
 
742
747
  self.id = id or str(UUIDT())
743
748
 
744
- await self._process_fields(db=db, fields=kwargs)
749
+ await self._process_fields(db=db, fields=kwargs, process_pools=process_pools)
745
750
  await self._process_macros(db=db)
746
751
 
747
752
  return self
@@ -1046,7 +1051,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
1046
1051
 
1047
1052
  return response
1048
1053
 
1049
- async def from_graphql(self, data: dict, db: InfrahubDatabase) -> bool:
1054
+ async def from_graphql(self, data: dict, db: InfrahubDatabase, process_pools: bool = True) -> bool:
1050
1055
  """Update object from a GraphQL payload."""
1051
1056
 
1052
1057
  changed = False
@@ -1054,7 +1059,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
1054
1059
  for key, value in data.items():
1055
1060
  if key in self._attributes and isinstance(value, dict):
1056
1061
  attribute = getattr(self, key)
1057
- changed |= await attribute.from_graphql(data=value, db=db)
1062
+ changed |= await attribute.from_graphql(data=value, db=db, process_pools=process_pools)
1058
1063
 
1059
1064
  if key in self._relationships:
1060
1065
  rel: RelationshipManager = getattr(self, key)
@@ -7,7 +7,7 @@ from infrahub.core import registry
7
7
  from infrahub.core.constants import RelationshipCardinality, RelationshipKind
8
8
  from infrahub.core.constraint.node.runner import NodeConstraintRunner
9
9
  from infrahub.core.node import Node
10
- from infrahub.core.node.lock_utils import get_kind_lock_names_on_object_mutation
10
+ from infrahub.core.node.lock_utils import get_lock_names_on_object_mutation
11
11
  from infrahub.core.protocols import CoreObjectTemplate
12
12
  from infrahub.core.schema import GenericSchema
13
13
  from infrahub.dependencies.registry import get_component_registry
@@ -62,14 +62,25 @@ async def extract_peer_data(
62
62
 
63
63
  for rel in template_peer.get_schema().relationship_names:
64
64
  rel_manager: RelationshipManager = getattr(template_peer, rel)
65
- if (
66
- rel_manager.schema.kind not in [RelationshipKind.COMPONENT, RelationshipKind.PARENT]
67
- or rel_manager.schema.name not in obj_peer_schema.relationship_names
68
- ):
65
+
66
+ if rel_manager.schema.name not in obj_peer_schema.relationship_names:
69
67
  continue
70
68
 
71
- if list(await rel_manager.get_peers(db=db)) == [current_template.id]:
69
+ peers_map = await rel_manager.get_peers(db=db)
70
+ if rel_manager.schema.kind in [RelationshipKind.COMPONENT, RelationshipKind.PARENT] and list(
71
+ peers_map.keys()
72
+ ) == [current_template.id]:
72
73
  obj_peer_data[rel] = {"id": parent_obj.id}
74
+ continue
75
+
76
+ rel_peer_ids = []
77
+ for peer_id, peer_object in peers_map.items():
78
+ # deeper templates are handled in the next level of recursion
79
+ if peer_object.get_schema().is_template_schema:
80
+ continue
81
+ rel_peer_ids.append({"id": peer_id})
82
+
83
+ obj_peer_data[rel] = rel_peer_ids
73
84
 
74
85
  return obj_peer_data
75
86
 
@@ -160,45 +171,6 @@ async def _do_create_node(
160
171
  return obj
161
172
 
162
173
 
163
- async def _do_create_node_with_lock(
164
- node_class: type[Node],
165
- node_constraint_runner: NodeConstraintRunner,
166
- db: InfrahubDatabase,
167
- schema: NonGenericSchemaTypes,
168
- branch: Branch,
169
- fields_to_validate: list[str],
170
- data: dict[str, Any],
171
- at: Timestamp | None = None,
172
- ) -> Node:
173
- schema_branch = registry.schema.get_schema_branch(name=branch.name)
174
- lock_names = get_kind_lock_names_on_object_mutation(
175
- kind=schema.kind, branch=branch, schema_branch=schema_branch, data=dict(data)
176
- )
177
-
178
- if lock_names:
179
- async with InfrahubMultiLock(lock_registry=lock.registry, locks=lock_names):
180
- return await _do_create_node(
181
- node_class=node_class,
182
- node_constraint_runner=node_constraint_runner,
183
- db=db,
184
- schema=schema,
185
- branch=branch,
186
- fields_to_validate=fields_to_validate,
187
- data=data,
188
- at=at,
189
- )
190
- return await _do_create_node(
191
- node_class=node_class,
192
- node_constraint_runner=node_constraint_runner,
193
- db=db,
194
- schema=schema,
195
- branch=branch,
196
- fields_to_validate=fields_to_validate,
197
- data=data,
198
- at=at,
199
- )
200
-
201
-
202
174
  async def create_node(
203
175
  data: dict[str, Any],
204
176
  db: InfrahubDatabase,
@@ -212,37 +184,48 @@ async def create_node(
212
184
  raise ValueError(f"Node of generic schema `{schema.name=}` can not be instantiated.")
213
185
 
214
186
  component_registry = get_component_registry()
215
- node_constraint_runner = await component_registry.get_component(
216
- NodeConstraintRunner, db=db.start_session() if not db.is_transaction else db, branch=branch
217
- )
218
187
  node_class = Node
219
188
  if schema.kind in registry.node:
220
189
  node_class = registry.node[schema.kind]
221
190
 
222
191
  fields_to_validate = list(data)
223
- if db.is_transaction:
224
- obj = await _do_create_node_with_lock(
225
- node_class=node_class,
226
- node_constraint_runner=node_constraint_runner,
227
- db=db,
228
- schema=schema,
229
- branch=branch,
230
- fields_to_validate=fields_to_validate,
231
- data=data,
232
- at=at,
233
- )
234
- else:
235
- async with db.start_transaction() as dbt:
236
- obj = await _do_create_node_with_lock(
192
+
193
+ preview_obj = await node_class.init(db=db, schema=schema, branch=branch)
194
+ await preview_obj.new(db=db, process_pools=False, **data)
195
+ schema_branch = db.schema.get_schema_branch(name=branch.name)
196
+ lock_names = get_lock_names_on_object_mutation(node=preview_obj, schema_branch=schema_branch)
197
+
198
+ obj: Node
199
+ async with InfrahubMultiLock(lock_registry=lock.registry, locks=lock_names, metrics=False):
200
+ if db.is_transaction:
201
+ node_constraint_runner = await component_registry.get_component(NodeConstraintRunner, db=db, branch=branch)
202
+
203
+ obj = await _do_create_node(
237
204
  node_class=node_class,
238
205
  node_constraint_runner=node_constraint_runner,
239
- db=dbt,
206
+ db=db,
240
207
  schema=schema,
241
208
  branch=branch,
242
209
  fields_to_validate=fields_to_validate,
243
210
  data=data,
244
211
  at=at,
245
212
  )
213
+ else:
214
+ async with db.start_transaction() as dbt:
215
+ node_constraint_runner = await component_registry.get_component(
216
+ NodeConstraintRunner, db=dbt, branch=branch
217
+ )
218
+
219
+ obj = await _do_create_node(
220
+ node_class=node_class,
221
+ node_constraint_runner=node_constraint_runner,
222
+ db=dbt,
223
+ schema=schema,
224
+ branch=branch,
225
+ fields_to_validate=fields_to_validate,
226
+ data=data,
227
+ at=at,
228
+ )
246
229
 
247
230
  if await get_profile_ids(db=db, obj=obj):
248
231
  node_profiles_applier = NodeProfilesApplier(db=db, branch=branch)
@@ -1,12 +1,15 @@
1
1
  import hashlib
2
- from typing import Any
2
+ from typing import TYPE_CHECKING
3
3
 
4
- from infrahub.core.branch import Branch
5
- from infrahub.core.constants.infrahubkind import GENERICGROUP, GRAPHQLQUERYGROUP
4
+ from infrahub.core.node import Node
6
5
  from infrahub.core.schema import GenericSchema
7
6
  from infrahub.core.schema.schema_branch import SchemaBranch
8
7
 
9
- KINDS_CONCURRENT_MUTATIONS_NOT_ALLOWED = [GENERICGROUP]
8
+ if TYPE_CHECKING:
9
+ from infrahub.core.relationship import RelationshipManager
10
+
11
+
12
+ RESOURCE_POOL_LOCK_NAMESPACE = "resource_pool"
10
13
 
11
14
 
12
15
  def _get_kinds_to_lock_on_object_mutation(kind: str, schema_branch: SchemaBranch) -> list[str]:
@@ -43,55 +46,78 @@ def _get_kinds_to_lock_on_object_mutation(kind: str, schema_branch: SchemaBranch
43
46
  return kinds
44
47
 
45
48
 
46
- def _should_kind_be_locked_on_any_branch(kind: str, schema_branch: SchemaBranch) -> bool:
47
- """
48
- Check whether kind or any kind generic is in KINDS_TO_LOCK_ON_ANY_BRANCH.
49
- """
50
-
51
- if kind in KINDS_CONCURRENT_MUTATIONS_NOT_ALLOWED:
52
- return True
53
-
54
- node_schema = schema_branch.get(name=kind, duplicate=False)
55
- if isinstance(node_schema, GenericSchema):
56
- return False
57
-
58
- for generic_kind in node_schema.inherit_from:
59
- if generic_kind in KINDS_CONCURRENT_MUTATIONS_NOT_ALLOWED:
60
- return True
61
- return False
62
-
63
-
64
49
  def _hash(value: str) -> str:
65
50
  # Do not use builtin `hash` for lock names as due to randomization results would differ between
66
51
  # different processes.
67
52
  return hashlib.sha256(value.encode()).hexdigest()
68
53
 
69
54
 
70
- def get_kind_lock_names_on_object_mutation(
71
- kind: str, branch: Branch, schema_branch: SchemaBranch, data: dict[str, Any]
72
- ) -> list[str]:
55
+ def get_lock_names_on_object_mutation(node: Node, schema_branch: SchemaBranch) -> list[str]:
73
56
  """
74
- Return objects kind for which we want to avoid concurrent mutation (create/update). Except for some specific kinds,
75
- concurrent mutations are only allowed on non-main branch as objects validations will be performed at least when merging in main branch.
57
+ Return lock names for object on which we want to avoid concurrent mutation (create/update).
58
+ Lock names include kind, some generic kinds, resource pool ids, and values of attributes of corresponding uniqueness constraints.
76
59
  """
77
60
 
78
- if not branch.is_default and not _should_kind_be_locked_on_any_branch(kind=kind, schema_branch=schema_branch):
79
- return []
80
-
81
- if kind == GRAPHQLQUERYGROUP:
82
- # Lock on name as well to improve performances
83
- try:
84
- name = data["name"].value
85
- return [build_object_lock_name(kind + "." + _hash(name))]
86
- except KeyError:
87
- # We might reach here if we are updating a CoreGraphQLQueryGroup without updating the name,
88
- # in which case we would not need to lock. This is not supposed to happen as current `update`
89
- # logic first fetches the node with its name.
90
- return []
91
-
92
- lock_kinds = _get_kinds_to_lock_on_object_mutation(kind, schema_branch)
93
- lock_names = [build_object_lock_name(kind) for kind in lock_kinds]
94
- return lock_names
61
+ lock_names: set[str] = set()
62
+
63
+ # Check if node is using resource manager allocation via attributes
64
+ for attr_name in node.get_schema().attribute_names:
65
+ attribute = getattr(node, attr_name, None)
66
+ if attribute is not None and getattr(attribute, "from_pool", None) and "id" in attribute.from_pool:
67
+ lock_names.add(f"{RESOURCE_POOL_LOCK_NAMESPACE}.{attribute.from_pool['id']}")
68
+
69
+ # Check if relationships allocate resources
70
+ for rel_name in node._relationships:
71
+ rel_manager: RelationshipManager = getattr(node, rel_name)
72
+ for rel in rel_manager._relationships:
73
+ if rel.from_pool and "id" in rel.from_pool:
74
+ lock_names.add(f"{RESOURCE_POOL_LOCK_NAMESPACE}.{rel.from_pool['id']}")
75
+
76
+ lock_kinds = _get_kinds_to_lock_on_object_mutation(node.get_kind(), schema_branch)
77
+ for kind in lock_kinds:
78
+ schema = schema_branch.get(name=kind, duplicate=False)
79
+ ucs = schema.uniqueness_constraints
80
+ if ucs is None:
81
+ continue
82
+
83
+ ucs_lock_names: list[str] = []
84
+ uc_attributes_names = set()
85
+
86
+ for uc in ucs:
87
+ uc_attributes_values = []
88
+ # Keep only attributes constraints
89
+ for field_path in uc:
90
+ # Some attributes may exist in different uniqueness constraints, we de-duplicate them
91
+ if field_path in uc_attributes_names:
92
+ continue
93
+
94
+ # Exclude relationships uniqueness constraints
95
+ schema_path = schema.parse_schema_path(path=field_path, schema=schema_branch)
96
+ if schema_path.related_schema is not None or schema_path.attribute_schema is None:
97
+ continue
98
+
99
+ uc_attributes_names.add(field_path)
100
+ attr = getattr(node, schema_path.attribute_schema.name, None)
101
+ if attr is None or attr.value is None:
102
+ # `attr.value` being None corresponds to optional unique attribute.
103
+ # `attr` being None is not supposed to happen.
104
+ value_hashed = _hash("")
105
+ else:
106
+ value_hashed = _hash(str(attr.value))
107
+
108
+ uc_attributes_values.append(value_hashed)
109
+
110
+ if uc_attributes_values:
111
+ uc_lock_name = ".".join(uc_attributes_values)
112
+ ucs_lock_names.append(uc_lock_name)
113
+
114
+ if not ucs_lock_names:
115
+ continue
116
+
117
+ partial_lock_name = kind + "." + ".".join(ucs_lock_names)
118
+ lock_names.add(build_object_lock_name(partial_lock_name))
119
+
120
+ return sorted(lock_names)
95
121
 
96
122
 
97
123
  def build_object_lock_name(name: str) -> str:
@@ -15,6 +15,7 @@ from infrahub.exceptions import PoolExhaustedError, ValidationError
15
15
  from infrahub.pools.address import get_available
16
16
 
17
17
  from .. import Node
18
+ from ..lock_utils import RESOURCE_POOL_LOCK_NAMESPACE
18
19
 
19
20
  if TYPE_CHECKING:
20
21
  from infrahub.core.branch import Branch
@@ -34,7 +35,7 @@ class CoreIPAddressPool(Node):
34
35
  prefixlen: int | None = None,
35
36
  at: Timestamp | None = None,
36
37
  ) -> Node:
37
- async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
38
+ async with lock.registry.get(name=self.get_id(), namespace=RESOURCE_POOL_LOCK_NAMESPACE):
38
39
  # Check if there is already a resource allocated with this identifier
39
40
  # if not, pull all existing prefixes and allocated the next available
40
41
 
@@ -17,6 +17,7 @@ from infrahub.exceptions import ValidationError
17
17
  from infrahub.pools.prefix import get_next_available_prefix
18
18
 
19
19
  from .. import Node
20
+ from ..lock_utils import RESOURCE_POOL_LOCK_NAMESPACE
20
21
 
21
22
  if TYPE_CHECKING:
22
23
  from infrahub.core.branch import Branch
@@ -37,7 +38,7 @@ class CoreIPPrefixPool(Node):
37
38
  prefix_type: str | None = None,
38
39
  at: Timestamp | None = None,
39
40
  ) -> Node:
40
- async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
41
+ async with lock.registry.get(name=self.get_id(), namespace=RESOURCE_POOL_LOCK_NAMESPACE):
41
42
  # Check if there is already a resource allocated with this identifier
42
43
  # if not, pull all existing prefixes and allocated the next available
43
44
  if identifier:
@@ -9,6 +9,7 @@ from infrahub.core.schema.attribute_parameters import NumberAttributeParameters
9
9
  from infrahub.exceptions import PoolExhaustedError
10
10
 
11
11
  from .. import Node
12
+ from ..lock_utils import RESOURCE_POOL_LOCK_NAMESPACE
12
13
 
13
14
  if TYPE_CHECKING:
14
15
  from infrahub.core.branch import Branch
@@ -63,7 +64,7 @@ class CoreNumberPool(Node):
63
64
  identifier: str | None = None,
64
65
  at: Timestamp | None = None,
65
66
  ) -> int:
66
- async with lock.registry.get(name=self.get_id(), namespace="resource_pool"):
67
+ async with lock.registry.get(name=self.get_id(), namespace=RESOURCE_POOL_LOCK_NAMESPACE):
67
68
  # NOTE: ideally we should use the HFID as the identifier (if available)
68
69
  # one of the challenge with using the HFID is that it might change over time
69
70
  # so we need to ensure that the identifier is stable, or we need to handle the case where the identifier changes
@@ -184,6 +184,61 @@ class AttributeUpdateNodePropertyQuery(AttributeQuery):
184
184
  self.return_labels = ["a", "np", "r"]
185
185
 
186
186
 
187
+ class AttributeClearNodePropertyQuery(AttributeQuery):
188
+ name = "attribute_clear_node_property"
189
+ type: QueryType = QueryType.WRITE
190
+ insert_return: bool = False
191
+
192
+ def __init__(
193
+ self,
194
+ prop_name: str,
195
+ prop_id: str | None = None,
196
+ **kwargs: Any,
197
+ ):
198
+ self.prop_name = prop_name
199
+ self.prop_id = prop_id
200
+
201
+ super().__init__(**kwargs)
202
+
203
+ async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
204
+ at = self.at or self.attr.at
205
+
206
+ branch_filter, branch_params = self.branch.get_query_filter_path(at=at)
207
+ self.params.update(branch_params)
208
+ self.params["attr_uuid"] = self.attr.id
209
+ self.params["branch"] = self.branch.name
210
+ self.params["branch_level"] = self.branch.hierarchy_level
211
+ self.params["at"] = at.to_string()
212
+ self.params["prop_name"] = self.prop_name
213
+ self.params["prop_id"] = self.prop_id
214
+
215
+ rel_label = f"HAS_{self.prop_name.upper()}"
216
+ query = """
217
+ MATCH (a:Attribute { uuid: $attr_uuid })-[r:%(rel_label)s]->(np:Node { uuid: $prop_id })
218
+ WITH DISTINCT a, np
219
+ CALL (a, np) {
220
+ MATCH (a)-[r:%(rel_label)s]->(np)
221
+ WHERE %(branch_filter)s
222
+ ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
223
+ LIMIT 1
224
+ RETURN r AS property_edge
225
+ }
226
+ WITH a, np, property_edge
227
+ WHERE property_edge.status = "active"
228
+ CALL (property_edge) {
229
+ WITH property_edge
230
+ WHERE property_edge.branch = $branch
231
+ SET property_edge.to = $at
232
+ }
233
+ CALL (a, np, property_edge) {
234
+ WITH property_edge
235
+ WHERE property_edge.branch_level < $branch_level
236
+ CREATE (a)-[r:%(rel_label)s { branch: $branch, branch_level: $branch_level, status: "deleted", from: $at }]->(np)
237
+ }
238
+ """ % {"branch_filter": branch_filter, "rel_label": rel_label}
239
+ self.add_to_query(query)
240
+
241
+
187
242
  class AttributeGetQuery(AttributeQuery):
188
243
  name = "attribute_get"
189
244
  type: QueryType = QueryType.READ
@@ -688,6 +688,7 @@ class IPPrefixReconcileQuery(Query):
688
688
  "ip_address_attribute_kind": ADDRESS_ATTRIBUTE_LABEL,
689
689
  }
690
690
  self.add_to_query(get_new_children_query)
691
+ self.order_by = ["ip_node.uuid"]
691
692
  self.return_labels = ["ip_node", "current_parent", "current_children", "new_parent", "new_children"]
692
693
 
693
694
  def _get_uuid_from_query(self, node_name: str) -> str | None:
@@ -246,11 +246,15 @@ class NodeCreateAllQuery(NodeQuery):
246
246
  ipnetwork_prop_list = [f"{key}: {value}" for key, value in ipnetwork_prop.items()]
247
247
 
248
248
  attrs_nonindexed_query = """
249
- WITH distinct n
249
+ WITH DISTINCT n
250
250
  UNWIND $attrs AS attr
251
251
  // Try to find a matching vertex
252
- OPTIONAL MATCH (existing_av:AttributeValue {value: attr.content.value, is_default: attr.content.is_default})
253
- WHERE NOT existing_av:AttributeValueIndexed
252
+ CALL (attr) {
253
+ OPTIONAL MATCH (existing_av:AttributeValue {value: attr.content.value, is_default: attr.content.is_default})
254
+ WHERE NOT existing_av:AttributeValueIndexed
255
+ RETURN existing_av
256
+ LIMIT 1
257
+ }
254
258
  CALL (attr, existing_av) {
255
259
  // If none found, create a new one
256
260
  WITH existing_av
@@ -915,6 +919,7 @@ class NodeListGetRelationshipsQuery(Query):
915
919
  RETURN DISTINCT n_uuid, rel_name, peer_uuid, direction
916
920
  """ % {"filters": rels_filter}
917
921
  self.add_to_query(query)
922
+ self.order_by = ["n_uuid", "rel_name", "peer_uuid", "direction"]
918
923
  self.return_labels = ["n_uuid", "rel_name", "peer_uuid", "direction"]
919
924
 
920
925
  def get_peers_group_by_node(self) -> GroupedPeerNodes:
@@ -984,6 +989,7 @@ class NodeListGetInfoQuery(Query):
984
989
  )
985
990
  self.params.update(branch_params)
986
991
  self.params["ids"] = self.ids
992
+ self.order_by = ["n.uuid"]
987
993
 
988
994
  query = """
989
995
  MATCH p = (root:Root)<-[:IS_PART_OF]-(n:Node)
@@ -1036,6 +1036,7 @@ class RelationshipCountPerNodeQuery(Query):
1036
1036
  """ % {"branch_filter": branch_filter, "path": path}
1037
1037
 
1038
1038
  self.add_to_query(query)
1039
+ self.order_by = ["peer_node.uuid"]
1039
1040
  self.return_labels = ["peer_node.uuid", "COUNT(peer_node.uuid) as nbr_peers"]
1040
1041
 
1041
1042
  async def get_count_per_peer(self) -> dict[str, int]: