infrahub-server 1.4.10__py3-none-any.whl → 1.5.0b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. infrahub/actions/tasks.py +200 -16
  2. infrahub/api/artifact.py +3 -0
  3. infrahub/api/query.py +2 -0
  4. infrahub/api/schema.py +3 -0
  5. infrahub/auth.py +5 -5
  6. infrahub/cli/db.py +2 -2
  7. infrahub/config.py +7 -2
  8. infrahub/core/attribute.py +22 -19
  9. infrahub/core/branch/models.py +2 -2
  10. infrahub/core/branch/needs_rebase_status.py +11 -0
  11. infrahub/core/branch/tasks.py +2 -2
  12. infrahub/core/constants/__init__.py +1 -0
  13. infrahub/core/convert_object_type/object_conversion.py +201 -0
  14. infrahub/core/convert_object_type/repository_conversion.py +89 -0
  15. infrahub/core/convert_object_type/schema_mapping.py +27 -3
  16. infrahub/core/diff/query/artifact.py +1 -1
  17. infrahub/core/graph/__init__.py +1 -1
  18. infrahub/core/initialization.py +2 -2
  19. infrahub/core/manager.py +3 -81
  20. infrahub/core/migrations/graph/__init__.py +2 -0
  21. infrahub/core/migrations/graph/m040_profile_attrs_in_db.py +166 -0
  22. infrahub/core/node/__init__.py +23 -2
  23. infrahub/core/node/create.py +67 -35
  24. infrahub/core/node/lock_utils.py +98 -0
  25. infrahub/core/property.py +11 -0
  26. infrahub/core/protocols.py +1 -0
  27. infrahub/core/query/attribute.py +27 -15
  28. infrahub/core/query/node.py +47 -184
  29. infrahub/core/query/relationship.py +43 -26
  30. infrahub/core/query/subquery.py +0 -8
  31. infrahub/core/relationship/model.py +59 -19
  32. infrahub/core/schema/attribute_schema.py +0 -2
  33. infrahub/core/schema/definitions/core/repository.py +7 -0
  34. infrahub/core/schema/relationship_schema.py +0 -1
  35. infrahub/core/schema/schema_branch.py +3 -2
  36. infrahub/generators/models.py +31 -12
  37. infrahub/generators/tasks.py +3 -1
  38. infrahub/git/base.py +38 -1
  39. infrahub/graphql/api/dependencies.py +2 -4
  40. infrahub/graphql/api/endpoints.py +2 -2
  41. infrahub/graphql/app.py +2 -4
  42. infrahub/graphql/initialization.py +2 -3
  43. infrahub/graphql/manager.py +212 -137
  44. infrahub/graphql/middleware.py +12 -0
  45. infrahub/graphql/mutations/branch.py +11 -0
  46. infrahub/graphql/mutations/computed_attribute.py +110 -3
  47. infrahub/graphql/mutations/convert_object_type.py +34 -13
  48. infrahub/graphql/mutations/ipam.py +21 -8
  49. infrahub/graphql/mutations/main.py +37 -153
  50. infrahub/graphql/mutations/profile.py +195 -0
  51. infrahub/graphql/mutations/proposed_change.py +2 -1
  52. infrahub/graphql/mutations/repository.py +22 -83
  53. infrahub/graphql/mutations/webhook.py +1 -1
  54. infrahub/graphql/registry.py +173 -0
  55. infrahub/graphql/schema.py +4 -1
  56. infrahub/lock.py +52 -26
  57. infrahub/locks/__init__.py +0 -0
  58. infrahub/locks/tasks.py +37 -0
  59. infrahub/patch/plan_writer.py +2 -2
  60. infrahub/profiles/__init__.py +0 -0
  61. infrahub/profiles/node_applier.py +101 -0
  62. infrahub/profiles/queries/__init__.py +0 -0
  63. infrahub/profiles/queries/get_profile_data.py +99 -0
  64. infrahub/profiles/tasks.py +63 -0
  65. infrahub/repositories/__init__.py +0 -0
  66. infrahub/repositories/create_repository.py +113 -0
  67. infrahub/tasks/registry.py +6 -4
  68. infrahub/webhook/models.py +1 -1
  69. infrahub/workflows/catalogue.py +38 -3
  70. infrahub/workflows/models.py +17 -2
  71. infrahub_sdk/branch.py +5 -8
  72. infrahub_sdk/client.py +364 -84
  73. infrahub_sdk/convert_object_type.py +61 -0
  74. infrahub_sdk/ctl/check.py +2 -3
  75. infrahub_sdk/ctl/cli_commands.py +16 -12
  76. infrahub_sdk/ctl/config.py +8 -2
  77. infrahub_sdk/ctl/generator.py +2 -3
  78. infrahub_sdk/ctl/repository.py +39 -1
  79. infrahub_sdk/ctl/schema.py +12 -1
  80. infrahub_sdk/ctl/utils.py +4 -0
  81. infrahub_sdk/ctl/validate.py +5 -3
  82. infrahub_sdk/diff.py +4 -5
  83. infrahub_sdk/exceptions.py +2 -0
  84. infrahub_sdk/graphql.py +7 -2
  85. infrahub_sdk/node/attribute.py +2 -0
  86. infrahub_sdk/node/node.py +28 -20
  87. infrahub_sdk/playback.py +1 -2
  88. infrahub_sdk/protocols.py +40 -6
  89. infrahub_sdk/pytest_plugin/plugin.py +7 -4
  90. infrahub_sdk/pytest_plugin/utils.py +40 -0
  91. infrahub_sdk/repository.py +1 -2
  92. infrahub_sdk/schema/main.py +1 -0
  93. infrahub_sdk/spec/object.py +43 -4
  94. infrahub_sdk/spec/range_expansion.py +118 -0
  95. infrahub_sdk/timestamp.py +18 -6
  96. {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b0.dist-info}/METADATA +6 -9
  97. {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b0.dist-info}/RECORD +102 -84
  98. infrahub_testcontainers/models.py +2 -2
  99. infrahub_testcontainers/performance_test.py +4 -4
  100. infrahub/core/convert_object_type/conversion.py +0 -134
  101. {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b0.dist-info}/LICENSE.txt +0 -0
  102. {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b0.dist-info}/WHEEL +0 -0
  103. {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,201 @@
1
+ from typing import Any, assert_never
2
+
3
+ from infrahub_sdk.convert_object_type import ConversionFieldInput, ConversionFieldValue
4
+
5
+ from infrahub.core.attribute import BaseAttribute
6
+ from infrahub.core.branch import Branch
7
+ from infrahub.core.branch.enums import BranchStatus
8
+ from infrahub.core.constants import BranchSupportType, RelationshipCardinality
9
+ from infrahub.core.manager import NodeManager
10
+ from infrahub.core.node import Node
11
+ from infrahub.core.node.create import create_node
12
+ from infrahub.core.query.relationship import GetAllPeersIds
13
+ from infrahub.core.query.resource_manager import PoolChangeReserved
14
+ from infrahub.core.relationship import RelationshipManager
15
+ from infrahub.core.schema import NodeSchema
16
+ from infrahub.core.timestamp import Timestamp
17
+ from infrahub.database import InfrahubDatabase
18
+ from infrahub.message_bus.messages import RefreshRegistryBranches
19
+ from infrahub.tasks.registry import update_branch_registry
20
+ from infrahub.workers.dependencies import get_message_bus
21
+
22
+
23
+ def _get_conversion_field_raw_value(conv_field_value: ConversionFieldValue) -> Any:
24
+ if conv_field_value.attribute_value is not None:
25
+ return conv_field_value.attribute_value
26
+ if conv_field_value.peer_id is not None:
27
+ return conv_field_value.peer_id
28
+ if conv_field_value.peers_ids is not None:
29
+ return conv_field_value.peers_ids
30
+ raise ValueError("ConversionFieldValue has not been validated correctly.")
31
+
32
+
33
+ async def get_out_rels_peers_ids(node: Node, db: InfrahubDatabase, at: Timestamp) -> list[str]:
34
+ all_peers_ids: list[str] = []
35
+ for name in node._relationships:
36
+ relm: RelationshipManager = getattr(node, name)
37
+ peers = await relm.get_db_peers(db=db, at=at)
38
+ all_peers_ids.extend([str(peer.peer_id) for peer in peers])
39
+ return all_peers_ids
40
+
41
+
42
+ async def build_data_new_node(db: InfrahubDatabase, mapping: dict[str, ConversionFieldInput], node: Node) -> dict:
43
+ """Value of a given field on the target kind to convert is either an input source attribute/relationship of the source node,
44
+ or a raw value."""
45
+
46
+ data = {}
47
+ for dest_field_name, conv_field_input in mapping.items():
48
+ if conv_field_input.source_field is not None:
49
+ # Fetch the value of the corresponding field from the node being converted.
50
+ item = getattr(node, conv_field_input.source_field)
51
+ if isinstance(item, BaseAttribute):
52
+ data[dest_field_name] = item.value
53
+ elif isinstance(item, RelationshipManager):
54
+ if item.schema.cardinality == RelationshipCardinality.ONE:
55
+ peer = await item.get_peer(db=db)
56
+ if peer is not None:
57
+ data[dest_field_name] = {"id": peer.id}
58
+ # else, relationship is optional, and if the target relationship is mandatory an error will be raised during creation
59
+ elif item.schema.cardinality == RelationshipCardinality.MANY:
60
+ data[dest_field_name] = [{"id": peer.id} for _, peer in (await item.get_peers(db=db)).items()]
61
+ else:
62
+ assert_never(item.schema.cardinality)
63
+ elif conv_field_input.data is not None:
64
+ data[dest_field_name] = _get_conversion_field_raw_value(conv_field_input.data)
65
+ elif conv_field_input.use_default_value is True:
66
+ pass # default value will be used automatically when creating the node
67
+ else:
68
+ raise ValueError("ConversionFieldInput has not been validated correctly.")
69
+ return data
70
+
71
+
72
+ async def get_unidirectional_rels_peers_ids(
73
+ node: Node, branch: Branch, db: InfrahubDatabase, at: Timestamp
74
+ ) -> list[str]:
75
+ """
76
+ Returns peers ids of nodes connected to input `node` through an incoming unidirectional relationship.
77
+ """
78
+
79
+ out_rels_identifier = [rel.identifier for rel in node.get_schema().relationships]
80
+ branch_agnostic = node.get_schema().branch == BranchSupportType.AGNOSTIC
81
+ query = await GetAllPeersIds.init(
82
+ db=db,
83
+ node_id=node.id,
84
+ branch=branch,
85
+ exclude_identifiers=out_rels_identifier,
86
+ branch_agnostic=branch_agnostic,
87
+ at=at,
88
+ )
89
+ await query.execute(db=db)
90
+ return query.get_peers_uuids()
91
+
92
+
93
+ async def _get_other_active_branches(db: InfrahubDatabase) -> list[Branch]:
94
+ branches = await Branch.get_list(db=db)
95
+ return [branch for branch in branches if not (branch.is_global or branch.is_default)]
96
+
97
+
98
+ def _has_pass_thru_aware_attributes(node_schema: NodeSchema, mapping: dict[str, ConversionFieldInput]) -> bool:
99
+ aware_attributes = [attr for attr in node_schema.attributes if attr.branch != BranchSupportType.AGNOSTIC]
100
+ aware_attributes_pass_thru = [
101
+ attr.name for attr in aware_attributes if attr.name in mapping and mapping[attr.name].source_field is not None
102
+ ]
103
+ return len(aware_attributes_pass_thru) > 0
104
+
105
+
106
+ async def validate_conversion(
107
+ deleted_node: Node, branch: Branch, db: InfrahubDatabase, timestamp_before_conversion: Timestamp
108
+ ) -> None:
109
+ deleted_node_out_rels_peer_ids = await get_out_rels_peers_ids(
110
+ node=deleted_node, db=db, at=timestamp_before_conversion
111
+ )
112
+ deleted_node_unidir_rels_peer_ids = await get_unidirectional_rels_peers_ids(
113
+ node=deleted_node, db=db, branch=branch, at=timestamp_before_conversion
114
+ )
115
+
116
+ # Make sure relationships with constraints are not broken by retrieving them
117
+ peers_ids = deleted_node_out_rels_peer_ids + deleted_node_unidir_rels_peer_ids
118
+ peers = await NodeManager.get_many(ids=peers_ids, db=db, prefetch_relationships=True, branch=branch)
119
+ for peer in peers.values():
120
+ peer.validate_relationships()
121
+
122
+
123
+ async def convert_and_validate_object_type(
124
+ node: Node,
125
+ target_schema: NodeSchema,
126
+ mapping: dict[str, ConversionFieldInput],
127
+ branch: Branch,
128
+ db: InfrahubDatabase,
129
+ ) -> Node:
130
+ async with db.start_transaction() as dbt:
131
+ timestamp_before_conversion = Timestamp()
132
+ new_node = await convert_object_type(
133
+ node=node, target_schema=target_schema, mapping=mapping, branch=branch, db=dbt
134
+ )
135
+ await validate_conversion(
136
+ deleted_node=node, branch=branch, db=dbt, timestamp_before_conversion=timestamp_before_conversion
137
+ )
138
+
139
+ # Refresh outside the transaction otherwise other workers would pull outdated branch objects.
140
+ message_bus = await get_message_bus()
141
+ await message_bus.send(RefreshRegistryBranches())
142
+
143
+ return new_node
144
+
145
+
146
+ async def convert_object_type(
147
+ node: Node,
148
+ target_schema: NodeSchema,
149
+ mapping: dict[str, ConversionFieldInput],
150
+ branch: Branch,
151
+ db: InfrahubDatabase,
152
+ ) -> Node:
153
+ """Delete the node and return the new created one. If creation fails, the node is not deleted, and raise an error.
154
+ An extra check is performed on input node peers relationships to make sure they are still valid."""
155
+
156
+ node_schema = node.get_schema()
157
+ if not isinstance(node_schema, NodeSchema):
158
+ raise ValueError(f"Only a node with a NodeSchema can be converted, got {type(node_schema)}")
159
+
160
+ # Delete the node, so we delete relationships with peers as well, which might temporarily break cardinality constraints
161
+ # but they should be restored when creating the new node.
162
+ deleted_nodes = await NodeManager.delete(db=db, branch=branch, nodes=[node], cascade_delete=False)
163
+ if len(deleted_nodes) != 1:
164
+ raise ValueError(f"Deleted {len(deleted_nodes)} nodes instead of 1")
165
+
166
+ data_new_node = await build_data_new_node(db, mapping, node)
167
+
168
+ if node_schema.branch == BranchSupportType.AGNOSTIC and _has_pass_thru_aware_attributes(
169
+ node_schema=node_schema, mapping=mapping
170
+ ):
171
+ if not branch.is_default:
172
+ raise ValueError(
173
+ f"Conversion of {node_schema.kind} is not allowed on branch {branch.name} because it is agnostic and has aware attributes"
174
+ )
175
+
176
+ # When converting an agnostic node with aware attributes, we need to put other branches in NEED_REBASE state
177
+ # as aware attributes do not exist in other branches after conversion
178
+ other_branches = await _get_other_active_branches(db=db)
179
+ for br in other_branches:
180
+ br.status = BranchStatus.NEED_REBASE
181
+ await br.save(db=db)
182
+ # Registry of other API workers are updated outside the transaction
183
+ await update_branch_registry(db=db, branch=br)
184
+
185
+ node_created = await create_node(
186
+ data=data_new_node,
187
+ db=db,
188
+ branch=branch,
189
+ schema=target_schema,
190
+ )
191
+
192
+ # If the node had some value reserved in any Pools / Resource Manager, we need to change the identifier of the reservation(s)
193
+ query = await PoolChangeReserved.init(
194
+ db=db,
195
+ existing_identifier=node.get_id(),
196
+ new_identifier=node_created.get_id(),
197
+ branch=branch,
198
+ )
199
+ await query.execute(db=db)
200
+
201
+ return node_created
@@ -0,0 +1,89 @@
1
+ from infrahub import lock
2
+ from infrahub.core.branch import Branch
3
+ from infrahub.core.constants.infrahubkind import REPOSITORYVALIDATOR, USERVALIDATOR
4
+ from infrahub.core.convert_object_type.object_conversion import (
5
+ ConversionFieldInput,
6
+ convert_object_type,
7
+ validate_conversion,
8
+ )
9
+ from infrahub.core.manager import NodeManager
10
+ from infrahub.core.node import Node
11
+ from infrahub.core.protocols import CoreReadOnlyRepository, CoreRepository
12
+ from infrahub.core.schema import NodeSchema
13
+ from infrahub.core.timestamp import Timestamp
14
+ from infrahub.database import InfrahubDatabase
15
+ from infrahub.message_bus.messages import RefreshRegistryBranches
16
+ from infrahub.repositories.create_repository import RepositoryFinalizer
17
+ from infrahub.workers.dependencies import get_message_bus
18
+
19
+
20
+ async def convert_repository_type(
21
+ repository: CoreRepository | CoreReadOnlyRepository,
22
+ target_schema: NodeSchema,
23
+ mapping: dict[str, ConversionFieldInput],
24
+ branch: Branch,
25
+ db: InfrahubDatabase,
26
+ repository_post_creator: RepositoryFinalizer,
27
+ ) -> Node:
28
+ """Delete the node and return the new created one. If creation fails, the node is not deleted, and raise an error.
29
+ An extra check is performed on input node peers relationships to make sure they are still valid."""
30
+
31
+ repo_name = repository.name.value
32
+ async with lock.registry.get(name=repo_name, namespace="repository"):
33
+ async with db.start_transaction() as dbt:
34
+ timestamp_before_conversion = Timestamp()
35
+
36
+ # Fetch validators before deleting the repository otherwise validator-repository would no longer exist
37
+ user_validators = await NodeManager.query(
38
+ db=dbt, schema=USERVALIDATOR, prefetch_relationships=True, filters={"repository__id": repository.id}
39
+ )
40
+ repository_validators = await NodeManager.query(
41
+ db=dbt,
42
+ schema=REPOSITORYVALIDATOR,
43
+ prefetch_relationships=True,
44
+ filters={"repository__id": repository.id},
45
+ )
46
+ new_repository = await convert_object_type(
47
+ node=repository, # type: ignore[arg-type]
48
+ target_schema=target_schema,
49
+ mapping=mapping,
50
+ branch=branch,
51
+ db=dbt,
52
+ )
53
+
54
+ for user_validator in user_validators:
55
+ await user_validator.repository.update(db=dbt, data=new_repository)
56
+ await user_validator.repository.save(db=dbt)
57
+
58
+ for repository_validator in repository_validators:
59
+ await repository_validator.repository.update(db=dbt, data=new_repository)
60
+ await repository_validator.repository.save(db=dbt)
61
+
62
+ await validate_conversion(
63
+ deleted_node=repository, # type: ignore[arg-type]
64
+ branch=branch,
65
+ db=dbt,
66
+ timestamp_before_conversion=timestamp_before_conversion,
67
+ )
68
+
69
+ # Refresh outside the transaction otherwise other workers would pull outdated branch objects.
70
+ message_bus = await get_message_bus()
71
+ await message_bus.send(RefreshRegistryBranches())
72
+
73
+ # Following call involve a potential update of `commit` value of the newly created repository
74
+ # that would be done from another database connection so it can't be performed within above transaction.
75
+ # Also note since the conversion can only be performed on main branch here, it is fine that we do it
76
+ # after having updating other branches status to NEEDS_REBASE.
77
+ await repository_post_creator.post_create(
78
+ branch=branch,
79
+ obj=new_repository, # type: ignore
80
+ db=db,
81
+ delete_on_connectivity_failure=False,
82
+ )
83
+
84
+ # Delete the RepositoryGroup associated with the old repository, as a new one was created for the new repository.
85
+ repository_groups = (await repository.groups_objects.get_peers(db=db)).values()
86
+ for repository_group in repository_groups:
87
+ await NodeManager.delete(db=db, branch=branch, nodes=[repository_group], cascade_delete=False)
88
+
89
+ return new_repository
@@ -1,6 +1,6 @@
1
1
  from pydantic import BaseModel
2
2
 
3
- from infrahub.core.constants import RelationshipCardinality
3
+ from infrahub.core.constants import BranchSupportType, RelationshipCardinality
4
4
  from infrahub.core.schema import NodeSchema
5
5
 
6
6
 
@@ -13,6 +13,21 @@ class SchemaMappingValue(BaseModel):
13
13
  SchemaMapping = dict[str, SchemaMappingValue]
14
14
 
15
15
 
16
+ def _are_branch_support_matching(
17
+ left_branch_support: BranchSupportType | None,
18
+ right_branch_support: BranchSupportType | None,
19
+ ) -> bool:
20
+ if left_branch_support == right_branch_support:
21
+ return True
22
+
23
+ local_aware = [BranchSupportType.AWARE, BranchSupportType.LOCAL]
24
+
25
+ if left_branch_support in local_aware and right_branch_support in local_aware:
26
+ return True
27
+
28
+ return False
29
+
30
+
16
31
  def get_schema_mapping(source_schema: NodeSchema, target_schema: NodeSchema) -> SchemaMapping:
17
32
  """
18
33
  Return fields mapping meant to be used for converting a node from `source_kind` to `target_kind`.
@@ -31,7 +46,11 @@ def get_schema_mapping(source_schema: NodeSchema, target_schema: NodeSchema) ->
31
46
  # Process attributes
32
47
  for target_attr in target_schema.attributes:
33
48
  source_attr = source_attrs.get(target_attr.name)
34
- if source_attr and source_attr.kind == target_attr.kind:
49
+ if (
50
+ source_attr
51
+ and source_attr.kind == target_attr.kind
52
+ and _are_branch_support_matching(source_attr.branch, target_attr.branch)
53
+ ):
35
54
  target_field_to_source_field[target_attr.name] = SchemaMappingValue(
36
55
  source_field_name=source_attr.name, is_mandatory=not target_attr.optional
37
56
  )
@@ -41,7 +60,12 @@ def get_schema_mapping(source_schema: NodeSchema, target_schema: NodeSchema) ->
41
60
  # Process relationships
42
61
  for target_rel in target_schema.relationships:
43
62
  source_rel = source_rels.get(target_rel.name)
44
- if source_rel and source_rel.peer == target_rel.peer and source_rel.cardinality == target_rel.cardinality:
63
+ if (
64
+ source_rel
65
+ and source_rel.peer == target_rel.peer
66
+ and source_rel.cardinality == target_rel.cardinality
67
+ and _are_branch_support_matching(source_rel.branch, target_rel.branch)
68
+ ):
45
69
  target_field_to_source_field[target_rel.name] = SchemaMappingValue(
46
70
  source_field_name=source_rel.name,
47
71
  is_mandatory=not target_rel.optional,
@@ -148,7 +148,7 @@ CALL (target_node, definition_node){
148
148
  )
149
149
  RETURN
150
150
  target_artifact,
151
- (trel1.status = "active" AND trel2.status = "active" AND drel1.status = "active" AND drel1.status = "active") AS artifact_is_active
151
+ (trel1.status = "active" AND trel2.status = "active" AND drel1.status = "active" AND drel2.status = "active") AS artifact_is_active
152
152
  ORDER BY trel1.from DESC, trel2.from DESC, drel1.from DESC, drel2.from DESC,
153
153
  trel1.status ASC, trel2.status ASC, drel1.status ASC, drel2.status ASC
154
154
  LIMIT 1
@@ -1 +1 @@
1
- GRAPH_VERSION = 39
1
+ GRAPH_VERSION = 40
@@ -34,7 +34,7 @@ from infrahub.database import InfrahubDatabase
34
34
  from infrahub.database.memgraph import IndexManagerMemgraph
35
35
  from infrahub.database.neo4j import IndexManagerNeo4j
36
36
  from infrahub.exceptions import DatabaseError
37
- from infrahub.graphql.manager import GraphQLSchemaManager
37
+ from infrahub.graphql.manager import registry as graphql_registry
38
38
  from infrahub.log import get_logger
39
39
  from infrahub.menu.utils import create_default_menu
40
40
  from infrahub.permissions import PermissionBackend, get_or_create_global_permission
@@ -196,7 +196,7 @@ async def initialization(db: InfrahubDatabase, add_database_indexes: bool = Fals
196
196
 
197
197
  default_branch = registry.get_branch_from_registry(branch=registry.default_branch)
198
198
  schema_branch = registry.schema.get_schema_branch(name=default_branch.name)
199
- gqlm = GraphQLSchemaManager.get_manager_for_branch(branch=default_branch, schema_branch=schema_branch)
199
+ gqlm = graphql_registry.get_manager_for_branch(branch=default_branch, schema_branch=schema_branch)
200
200
  gqlm.get_graphql_schema(
201
201
  include_query=True,
202
202
  include_mutation=True,
infrahub/core/manager.py CHANGED
@@ -1,7 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from copy import copy
4
- from functools import reduce
5
4
  from typing import TYPE_CHECKING, Any, Iterable, Literal, TypeVar, overload
6
5
 
7
6
  from infrahub_sdk.utils import deep_merge_dict, is_valid_uuid
@@ -11,9 +10,7 @@ from infrahub.core.node import Node
11
10
  from infrahub.core.node.delete_validator import NodeDeleteValidator
12
11
  from infrahub.core.query.node import (
13
12
  AttributeFromDB,
14
- AttributeNodePropertyFromDB,
15
13
  GroupedPeerNodes,
16
- NodeAttributesFromDB,
17
14
  NodeGetHierarchyQuery,
18
15
  NodeGetListQuery,
19
16
  NodeListGetAttributeQuery,
@@ -75,60 +72,6 @@ def get_schema(
75
72
  return node_schema
76
73
 
77
74
 
78
- class ProfileAttributeIndex:
79
- def __init__(
80
- self,
81
- profile_attributes_id_map: dict[str, NodeAttributesFromDB],
82
- profile_ids_by_node_id: dict[str, list[str]],
83
- ) -> None:
84
- self._profile_attributes_id_map = profile_attributes_id_map
85
- self._profile_ids_by_node_id = profile_ids_by_node_id
86
-
87
- def apply_profiles(self, node_data_dict: dict[str, Any]) -> dict[str, Any]:
88
- updated_data: dict[str, Any] = {**node_data_dict}
89
- node_id = node_data_dict.get("id")
90
- profile_ids = self._profile_ids_by_node_id.get(node_id, [])
91
- if not profile_ids:
92
- return updated_data
93
- profiles = [
94
- self._profile_attributes_id_map[p_id] for p_id in profile_ids if p_id in self._profile_attributes_id_map
95
- ]
96
-
97
- def get_profile_priority(nafd: NodeAttributesFromDB) -> tuple[int | float, str]:
98
- try:
99
- return (int(nafd.attrs.get("profile_priority").value), nafd.node.get("uuid"))
100
- except (TypeError, AttributeError):
101
- return (float("inf"), "")
102
-
103
- profiles.sort(key=get_profile_priority)
104
-
105
- for attr_name, attr_data in updated_data.items():
106
- if not isinstance(attr_data, AttributeFromDB):
107
- continue
108
- if not attr_data.is_default:
109
- continue
110
- profile_value, profile_uuid = None, None
111
- index = 0
112
-
113
- while profile_value is None and index <= (len(profiles) - 1):
114
- try:
115
- profile_value = profiles[index].attrs[attr_name].value
116
- if profile_value != "NULL":
117
- profile_uuid = profiles[index].node["uuid"]
118
- break
119
- profile_value = None
120
- except (IndexError, KeyError, AttributeError):
121
- ...
122
- index += 1
123
-
124
- if profile_value is not None:
125
- attr_data.value = profile_value
126
- attr_data.is_from_profile = True
127
- attr_data.is_default = False
128
- attr_data.node_properties["source"] = AttributeNodePropertyFromDB(uuid=profile_uuid, labels=[])
129
- return updated_data
130
-
131
-
132
75
  class NodeManager:
133
76
  @overload
134
77
  @classmethod
@@ -1129,21 +1072,11 @@ class NodeManager:
1129
1072
  )
1130
1073
  await query.execute(db=db)
1131
1074
  nodes_info_by_id: dict[str, NodeToProcess] = {node.node_uuid: node async for node in query.get_nodes(db=db)}
1132
- profile_ids_by_node_id = query.get_profile_ids_by_node_id()
1133
- all_profile_ids = reduce(
1134
- lambda all_ids, these_ids: all_ids | set(these_ids), profile_ids_by_node_id.values(), set()
1135
- )
1136
-
1137
- if fields and all_profile_ids:
1138
- if "profile_priority" not in fields:
1139
- fields["profile_priority"] = {}
1140
- if "value" not in fields["profile_priority"]:
1141
- fields["profile_priority"]["value"] = None
1142
1075
 
1143
1076
  # Query list of all Attributes
1144
1077
  query = await NodeListGetAttributeQuery.init(
1145
1078
  db=db,
1146
- ids=list(nodes_info_by_id.keys()) + list(all_profile_ids),
1079
+ ids=list(nodes_info_by_id.keys()),
1147
1080
  fields=fields,
1148
1081
  branch=branch,
1149
1082
  include_source=include_source,
@@ -1153,17 +1086,7 @@ class NodeManager:
1153
1086
  branch_agnostic=branch_agnostic,
1154
1087
  )
1155
1088
  await query.execute(db=db)
1156
- all_node_attributes = query.get_attributes_group_by_node()
1157
- profile_attributes: dict[str, dict[str, AttributeFromDB]] = {}
1158
- node_attributes: dict[str, dict[str, AttributeFromDB]] = {}
1159
- for node_id, attribute_dict in all_node_attributes.items():
1160
- if node_id in all_profile_ids:
1161
- profile_attributes[node_id] = attribute_dict
1162
- else:
1163
- node_attributes[node_id] = attribute_dict
1164
- profile_index = ProfileAttributeIndex(
1165
- profile_attributes_id_map=profile_attributes, profile_ids_by_node_id=profile_ids_by_node_id
1166
- )
1089
+ node_attributes = query.get_attributes_group_by_node()
1167
1090
 
1168
1091
  nodes: dict[str, Node] = {}
1169
1092
 
@@ -1192,11 +1115,10 @@ class NodeManager:
1192
1115
  for attr_name, attr in node_attributes[node_id].attrs.items():
1193
1116
  new_node_data[attr_name] = attr
1194
1117
 
1195
- new_node_data_with_profile_overrides = profile_index.apply_profiles(new_node_data)
1196
1118
  node_class = identify_node_class(node=node)
1197
1119
  node_branch = await registry.get_branch(db=db, branch=node.branch)
1198
1120
  item = await node_class.init(schema=node.schema, branch=node_branch, at=at, db=db)
1199
- await item.load(**new_node_data_with_profile_overrides, db=db)
1121
+ await item.load(**new_node_data, db=db)
1200
1122
 
1201
1123
  nodes[node_id] = item
1202
1124
 
@@ -41,6 +41,7 @@ from .m036_drop_attr_value_index import Migration036
41
41
  from .m037_index_attr_vals import Migration037
42
42
  from .m038_redo_0000_prefix_fix import Migration038
43
43
  from .m039_ipam_reconcile import Migration039
44
+ from .m040_profile_attrs_in_db import Migration040
44
45
 
45
46
  if TYPE_CHECKING:
46
47
  from infrahub.core.root import Root
@@ -87,6 +88,7 @@ MIGRATIONS: list[type[GraphMigration | InternalSchemaMigration | ArbitraryMigrat
87
88
  Migration037,
88
89
  Migration038,
89
90
  Migration039,
91
+ Migration040,
90
92
  ]
91
93
 
92
94