infrahub-server 1.1.7__py3-none-any.whl → 1.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. infrahub/config.py +6 -0
  2. infrahub/core/diff/enricher/cardinality_one.py +5 -0
  3. infrahub/core/diff/enricher/hierarchy.py +17 -4
  4. infrahub/core/diff/enricher/labels.py +5 -0
  5. infrahub/core/diff/enricher/path_identifier.py +5 -0
  6. infrahub/core/diff/model/path.py +24 -1
  7. infrahub/core/diff/parent_node_adder.py +78 -0
  8. infrahub/core/diff/payload_builder.py +13 -2
  9. infrahub/core/diff/query/merge.py +20 -17
  10. infrahub/core/diff/query/save.py +188 -182
  11. infrahub/core/diff/query/summary_counts_enricher.py +51 -4
  12. infrahub/core/diff/repository/deserializer.py +8 -3
  13. infrahub/core/diff/repository/repository.py +156 -38
  14. infrahub/core/diff/tasks.py +4 -4
  15. infrahub/core/graph/__init__.py +1 -1
  16. infrahub/core/graph/index.py +3 -0
  17. infrahub/core/migrations/graph/__init__.py +6 -0
  18. infrahub/core/migrations/graph/m019_restore_rels_to_time.py +256 -0
  19. infrahub/core/migrations/graph/m020_duplicate_edges.py +160 -0
  20. infrahub/core/migrations/graph/m021_missing_hierarchy_merge.py +51 -0
  21. infrahub/core/migrations/query/node_duplicate.py +38 -18
  22. infrahub/core/migrations/schema/node_remove.py +26 -12
  23. infrahub/core/migrations/shared.py +10 -8
  24. infrahub/core/node/__init__.py +13 -8
  25. infrahub/core/node/constraints/grouped_uniqueness.py +16 -3
  26. infrahub/core/query/attribute.py +2 -0
  27. infrahub/core/query/node.py +69 -19
  28. infrahub/core/query/relationship.py +105 -16
  29. infrahub/core/query/resource_manager.py +2 -0
  30. infrahub/core/relationship/model.py +8 -12
  31. infrahub/core/schema/definitions/core.py +1 -0
  32. infrahub/database/__init__.py +10 -1
  33. infrahub/database/metrics.py +7 -1
  34. infrahub/dependencies/builder/diff/deserializer.py +3 -1
  35. infrahub/dependencies/builder/diff/enricher/hierarchy.py +3 -1
  36. infrahub/dependencies/builder/diff/parent_node_adder.py +8 -0
  37. infrahub/graphql/initialization.py +3 -0
  38. infrahub/graphql/loaders/node.py +2 -12
  39. infrahub/graphql/loaders/peers.py +77 -0
  40. infrahub/graphql/loaders/shared.py +13 -0
  41. infrahub/graphql/mutations/diff.py +17 -10
  42. infrahub/graphql/mutations/resource_manager.py +3 -3
  43. infrahub/graphql/resolvers/many_relationship.py +264 -0
  44. infrahub/graphql/resolvers/resolver.py +3 -103
  45. infrahub/graphql/subscription/graphql_query.py +2 -0
  46. infrahub_sdk/batch.py +2 -2
  47. infrahub_sdk/client.py +10 -2
  48. infrahub_sdk/config.py +4 -1
  49. infrahub_sdk/ctl/check.py +4 -4
  50. infrahub_sdk/ctl/cli_commands.py +16 -11
  51. infrahub_sdk/ctl/exceptions.py +0 -6
  52. infrahub_sdk/ctl/exporter.py +1 -1
  53. infrahub_sdk/ctl/generator.py +5 -5
  54. infrahub_sdk/ctl/importer.py +3 -2
  55. infrahub_sdk/ctl/menu.py +1 -1
  56. infrahub_sdk/ctl/object.py +1 -1
  57. infrahub_sdk/ctl/repository.py +23 -15
  58. infrahub_sdk/ctl/schema.py +2 -2
  59. infrahub_sdk/ctl/utils.py +6 -5
  60. infrahub_sdk/ctl/validate.py +2 -1
  61. infrahub_sdk/data.py +1 -1
  62. infrahub_sdk/exceptions.py +12 -0
  63. infrahub_sdk/generator.py +3 -0
  64. infrahub_sdk/node.py +8 -8
  65. infrahub_sdk/protocols.py +0 -1
  66. infrahub_sdk/schema/__init__.py +0 -3
  67. infrahub_sdk/testing/docker.py +30 -0
  68. infrahub_sdk/testing/schemas/animal.py +9 -0
  69. infrahub_sdk/transfer/exporter/json.py +1 -1
  70. infrahub_sdk/utils.py +11 -1
  71. infrahub_sdk/yaml.py +2 -3
  72. {infrahub_server-1.1.7.dist-info → infrahub_server-1.1.9.dist-info}/METADATA +1 -1
  73. {infrahub_server-1.1.7.dist-info → infrahub_server-1.1.9.dist-info}/RECORD +78 -71
  74. infrahub_testcontainers/container.py +11 -0
  75. infrahub_testcontainers/docker-compose.test.yml +3 -6
  76. infrahub_sdk/ctl/_file.py +0 -13
  77. {infrahub_server-1.1.7.dist-info → infrahub_server-1.1.9.dist-info}/LICENSE.txt +0 -0
  78. {infrahub_server-1.1.7.dist-info → infrahub_server-1.1.9.dist-info}/WHEEL +0 -0
  79. {infrahub_server-1.1.7.dist-info → infrahub_server-1.1.9.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  from typing import TYPE_CHECKING
2
2
 
3
- from graphene import Boolean, DateTime, InputObjectType, Mutation, String
3
+ from graphene import Boolean, DateTime, Field, InputObjectType, Mutation, String
4
4
  from graphql import GraphQLResolveInfo
5
5
 
6
6
  from infrahub.core import registry
@@ -9,11 +9,12 @@ from infrahub.core.diff.model.path import NameTrackingId
9
9
  from infrahub.core.diff.models import RequestDiffUpdate
10
10
  from infrahub.core.diff.repository.repository import DiffRepository
11
11
  from infrahub.core.timestamp import Timestamp
12
- from infrahub.database import retry_db_transaction
13
12
  from infrahub.dependencies.registry import get_component_registry
14
13
  from infrahub.exceptions import ValidationError
15
14
  from infrahub.workflows.catalogue import DIFF_UPDATE
16
15
 
16
+ from ..types.task import TaskInfo
17
+
17
18
  if TYPE_CHECKING:
18
19
  from ..initialization import GraphqlContext
19
20
 
@@ -23,25 +24,30 @@ class DiffUpdateInput(InputObjectType):
23
24
  name = String(required=False)
24
25
  from_time = DateTime(required=False)
25
26
  to_time = DateTime(required=False)
26
- wait_for_completion = Boolean(required=False)
27
+ wait_for_completion = Boolean(required=False, deprecation_reason="Please use `wait_until_completion` instead")
27
28
 
28
29
 
29
30
  class DiffUpdateMutation(Mutation):
30
31
  class Arguments:
31
32
  data = DiffUpdateInput(required=True)
33
+ wait_until_completion = Boolean(required=False)
32
34
 
33
35
  ok = Boolean()
36
+ task = Field(TaskInfo, required=False)
34
37
 
35
38
  @classmethod
36
- @retry_db_transaction(name="diff_update")
37
39
  async def mutate(
38
40
  cls,
39
41
  root: dict, # pylint: disable=unused-argument
40
42
  info: GraphQLResolveInfo,
41
43
  data: DiffUpdateInput,
42
- ) -> dict[str, bool]:
44
+ wait_until_completion: bool = False,
45
+ ) -> dict[str, bool | dict[str, str]]:
43
46
  context: GraphqlContext = info.context
44
47
 
48
+ if data.wait_for_completion is True:
49
+ wait_until_completion = True
50
+
45
51
  from_timestamp_str = DateTime.serialize(data.from_time) if data.from_time else None
46
52
  to_timestamp_str = DateTime.serialize(data.to_time) if data.to_time else None
47
53
  if (data.from_time or data.to_time) and not data.name:
@@ -53,11 +59,11 @@ class DiffUpdateMutation(Mutation):
53
59
  diff_repository = await component_registry.get_component(DiffRepository, db=context.db, branch=diff_branch)
54
60
 
55
61
  tracking_id = NameTrackingId(name=data.name)
56
- existing_diffs_metatdatas = await diff_repository.get_roots_metadata(
62
+ existing_diffs_metadatas = await diff_repository.get_roots_metadata(
57
63
  diff_branch_names=[diff_branch.name], base_branch_names=[base_branch.name], tracking_id=tracking_id
58
64
  )
59
- if existing_diffs_metatdatas:
60
- metadata = existing_diffs_metatdatas[0]
65
+ if existing_diffs_metadatas:
66
+ metadata = existing_diffs_metadatas[0]
61
67
  from_time = Timestamp(from_timestamp_str) if from_timestamp_str else None
62
68
  to_time = Timestamp(to_timestamp_str) if to_timestamp_str else None
63
69
  branched_from_timestamp = Timestamp(diff_branch.get_branched_from())
@@ -68,7 +74,7 @@ class DiffUpdateMutation(Mutation):
68
74
  if to_time and to_time < metadata.to_time:
69
75
  raise ValidationError(f"to_time must be null or greater than or equal to {metadata.to_time}")
70
76
 
71
- if data.wait_for_completion is True:
77
+ if wait_until_completion is True:
72
78
  diff_coordinator = await component_registry.get_component(
73
79
  DiffCoordinator, db=context.db, branch=diff_branch
74
80
  )
@@ -89,6 +95,7 @@ class DiffUpdateMutation(Mutation):
89
95
  to_time=to_timestamp_str,
90
96
  )
91
97
  if context.service:
92
- await context.service.workflow.submit_workflow(workflow=DIFF_UPDATE, parameters={"model": model})
98
+ workflow = await context.service.workflow.submit_workflow(workflow=DIFF_UPDATE, parameters={"model": model})
99
+ return {"ok": True, "task": {"id": str(workflow.id)}}
93
100
 
94
101
  return {"ok": True}
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
- from graphene import Boolean, Field, InputField, InputObjectType, Int, Mutation, String
5
+ from graphene import Boolean, Field, InputField, InputObjectType, Int, List, Mutation, String
6
6
  from graphene.types.generic import GenericScalar
7
7
  from typing_extensions import Self
8
8
 
@@ -30,7 +30,7 @@ if TYPE_CHECKING:
30
30
 
31
31
  class IPPrefixPoolGetResourceInput(InputObjectType):
32
32
  id = InputField(String(required=False), description="ID of the pool to allocate from")
33
- hfid = InputField(String(required=False), description="HFID of the pool to allocate from")
33
+ hfid = InputField(List(of_type=String, required=False), description="HFID of the pool to allocate from")
34
34
  identifier = InputField(String(required=False), description="Identifier for the allocated resource")
35
35
  prefix_length = InputField(Int(required=False), description="Size of the prefix to allocate")
36
36
  member_type = InputField(String(required=False), description="Type of members for the newly created prefix")
@@ -40,7 +40,7 @@ class IPPrefixPoolGetResourceInput(InputObjectType):
40
40
 
41
41
  class IPAddressPoolGetResourceInput(InputObjectType):
42
42
  id = InputField(String(required=False), description="ID of the pool to allocate from")
43
- hfid = InputField(String(required=False), description="HFID of the pool to allocate from")
43
+ hfid = InputField(List(of_type=String, required=False), description="HFID of the pool to allocate from")
44
44
  identifier = InputField(String(required=False), description="Identifier for the allocated resource")
45
45
  prefix_length = InputField(
46
46
  Int(required=False), description="Size of the prefix mask to allocate on the new IP address"
@@ -0,0 +1,264 @@
1
+ from typing import TYPE_CHECKING, Any
2
+
3
+ from graphql import GraphQLResolveInfo
4
+ from infrahub_sdk.utils import deep_merge_dict, extract_fields
5
+
6
+ from infrahub.core.branch.models import Branch
7
+ from infrahub.core.constants import BranchSupportType, RelationshipHierarchyDirection
8
+ from infrahub.core.manager import NodeManager
9
+ from infrahub.core.query.node import NodeGetHierarchyQuery
10
+ from infrahub.core.schema.node_schema import NodeSchema
11
+ from infrahub.core.schema.relationship_schema import RelationshipSchema
12
+ from infrahub.core.timestamp import Timestamp
13
+ from infrahub.database import InfrahubDatabase
14
+
15
+ from ..loaders.peers import PeerRelationshipsDataLoader, QueryPeerParams
16
+ from ..types import RELATIONS_PROPERTY_MAP, RELATIONS_PROPERTY_MAP_REVERSED
17
+
18
+ if TYPE_CHECKING:
19
+ from infrahub.core.schema import MainSchemaTypes
20
+
21
+ from ..initialization import GraphqlContext
22
+
23
+
24
+ class ManyRelationshipResolver:
25
+ def __init__(self) -> None:
26
+ self._data_loader_instances: dict[QueryPeerParams, PeerRelationshipsDataLoader] = {}
27
+
28
+ async def get_descendant_ids(
29
+ self,
30
+ db: InfrahubDatabase,
31
+ branch: Branch,
32
+ at: Timestamp | None,
33
+ parent_id: str,
34
+ node_schema: NodeSchema,
35
+ ) -> list[str]:
36
+ async with db.start_session() as dbs:
37
+ query = await NodeGetHierarchyQuery.init(
38
+ db=dbs,
39
+ direction=RelationshipHierarchyDirection.DESCENDANTS,
40
+ node_id=parent_id,
41
+ node_schema=node_schema,
42
+ at=at,
43
+ branch=branch,
44
+ )
45
+ await query.execute(db=dbs)
46
+ return list(query.get_peer_ids())
47
+
48
+ async def get_peer_count(
49
+ self,
50
+ db: InfrahubDatabase,
51
+ branch: Branch,
52
+ at: Timestamp | None,
53
+ ids: list[str],
54
+ source_kind: str,
55
+ rel_schema: RelationshipSchema,
56
+ filters: dict[str, Any],
57
+ ) -> int:
58
+ async with db.start_session() as dbs:
59
+ return await NodeManager.count_peers(
60
+ db=dbs,
61
+ ids=ids,
62
+ source_kind=source_kind,
63
+ schema=rel_schema,
64
+ filters=filters,
65
+ at=at,
66
+ branch=branch,
67
+ branch_agnostic=rel_schema.branch is BranchSupportType.AGNOSTIC,
68
+ )
69
+
70
+ async def resolve(
71
+ self,
72
+ parent: dict,
73
+ info: GraphQLResolveInfo,
74
+ include_descendants: bool = False,
75
+ offset: int | None = None,
76
+ limit: int | None = None,
77
+ **kwargs: Any,
78
+ ) -> dict[str, Any]:
79
+ """Resolver for relationships of cardinality=one for Edged responses
80
+
81
+ This resolver is used for paginated responses and as such we redefined the requested
82
+ fields by only reusing information below the 'node' key.
83
+ """
84
+ # Extract the InfraHub schema by inspecting the GQL Schema
85
+
86
+ node_schema: MainSchemaTypes = info.parent_type.graphene_type._meta.schema # type: ignore[attr-defined]
87
+
88
+ context: GraphqlContext = info.context
89
+
90
+ # Extract the name of the fields in the GQL query
91
+ fields = await extract_fields(info.field_nodes[0].selection_set)
92
+ edges = fields.get("edges", {})
93
+ node_fields = edges.get("node", {})
94
+ property_fields = edges.get("properties", {})
95
+ for key, value in property_fields.items():
96
+ mapped_name = RELATIONS_PROPERTY_MAP[key]
97
+ node_fields[mapped_name] = value
98
+
99
+ filters = {
100
+ f"{info.field_name}__{key}": value
101
+ for key, value in kwargs.items()
102
+ if "__" in key and value or key in ["id", "ids"]
103
+ }
104
+
105
+ response: dict[str, Any] = {"edges": [], "count": None}
106
+
107
+ # Extract the schema of the node on the other end of the relationship from the GQL Schema
108
+ node_rel = node_schema.get_relationship(info.field_name)
109
+ source_kind = node_schema.kind
110
+ ids = [parent["id"]]
111
+ if isinstance(node_schema, NodeSchema):
112
+ if node_schema.hierarchy:
113
+ source_kind = node_schema.hierarchy
114
+
115
+ if include_descendants:
116
+ descendant_ids = await self.get_descendant_ids(
117
+ db=context.db,
118
+ branch=context.branch,
119
+ at=context.at,
120
+ parent_id=ids[0],
121
+ node_schema=node_schema,
122
+ )
123
+ ids.extend(descendant_ids)
124
+
125
+ if "count" in fields:
126
+ peer_count = await self.get_peer_count(
127
+ db=context.db,
128
+ branch=context.branch,
129
+ at=context.at,
130
+ ids=ids,
131
+ source_kind=source_kind,
132
+ rel_schema=node_rel,
133
+ filters=filters,
134
+ )
135
+ response["count"] = peer_count
136
+
137
+ if not node_fields:
138
+ return response
139
+
140
+ if offset or limit:
141
+ node_graph = await self._get_entities_simple(
142
+ db=context.db,
143
+ branch=context.branch,
144
+ ids=ids,
145
+ at=context.at,
146
+ related_node_ids=context.related_node_ids,
147
+ source_kind=source_kind,
148
+ rel_schema=node_rel,
149
+ filters=filters,
150
+ node_fields=node_fields,
151
+ offset=offset,
152
+ limit=limit,
153
+ )
154
+ else:
155
+ node_graph = await self._get_entities_with_data_loader(
156
+ db=context.db,
157
+ branch=context.branch,
158
+ ids=ids,
159
+ at=context.at,
160
+ related_node_ids=context.related_node_ids,
161
+ source_kind=source_kind,
162
+ rel_schema=node_rel,
163
+ filters=filters,
164
+ node_fields=node_fields,
165
+ )
166
+
167
+ if not node_graph:
168
+ return response
169
+
170
+ entries = []
171
+ for node in node_graph:
172
+ entry: dict[str, dict[str, Any]] = {"node": {}, "properties": {}}
173
+ for key, mapped in RELATIONS_PROPERTY_MAP_REVERSED.items():
174
+ value = node.pop(key, None)
175
+ if value:
176
+ entry["properties"][mapped] = value
177
+ entry["node"] = node
178
+ entries.append(entry)
179
+
180
+ response["edges"] = entries
181
+ return response
182
+
183
+ async def _get_entities_simple(
184
+ self,
185
+ db: InfrahubDatabase,
186
+ branch: Branch,
187
+ ids: list[str],
188
+ at: Timestamp | None,
189
+ related_node_ids: set[str] | None,
190
+ source_kind: str,
191
+ rel_schema: RelationshipSchema,
192
+ filters: dict[str, Any],
193
+ node_fields: dict[str, Any],
194
+ offset: int | None = None,
195
+ limit: int | None = None,
196
+ ) -> list[dict[str, Any]] | None:
197
+ async with db.start_session() as dbs:
198
+ objs = await NodeManager.query_peers(
199
+ db=dbs,
200
+ ids=ids,
201
+ source_kind=source_kind,
202
+ schema=rel_schema,
203
+ filters=filters,
204
+ fields=node_fields,
205
+ offset=offset,
206
+ limit=limit,
207
+ at=at,
208
+ branch=branch,
209
+ branch_agnostic=rel_schema.branch is BranchSupportType.AGNOSTIC,
210
+ fetch_peers=True,
211
+ )
212
+ if not objs:
213
+ return None
214
+ return [await obj.to_graphql(db=dbs, fields=node_fields, related_node_ids=related_node_ids) for obj in objs]
215
+
216
+ async def _get_entities_with_data_loader(
217
+ self,
218
+ db: InfrahubDatabase,
219
+ branch: Branch,
220
+ ids: list[str],
221
+ at: Timestamp | None,
222
+ related_node_ids: set[str] | None,
223
+ source_kind: str,
224
+ rel_schema: RelationshipSchema,
225
+ filters: dict[str, Any],
226
+ node_fields: dict[str, Any],
227
+ ) -> list[dict[str, Any]] | None:
228
+ if node_fields and "display_label" in node_fields:
229
+ schema_branch = db.schema.get_schema_branch(name=branch.name)
230
+ display_label_fields = schema_branch.generate_fields_for_display_label(name=rel_schema.peer)
231
+ if display_label_fields:
232
+ node_fields = deep_merge_dict(dicta=node_fields, dictb=display_label_fields)
233
+
234
+ if node_fields and "hfid" in node_fields:
235
+ peer_schema = db.schema.get(name=rel_schema.peer, branch=branch, duplicate=False)
236
+ hfid_fields = peer_schema.generate_fields_for_hfid()
237
+ if hfid_fields:
238
+ node_fields = deep_merge_dict(dicta=node_fields, dictb=hfid_fields)
239
+
240
+ query_params = QueryPeerParams(
241
+ branch=branch,
242
+ source_kind=source_kind,
243
+ schema=rel_schema,
244
+ filters=filters,
245
+ fields=node_fields,
246
+ at=at,
247
+ branch_agnostic=rel_schema.branch is BranchSupportType.AGNOSTIC,
248
+ )
249
+ if query_params in self._data_loader_instances:
250
+ loader = self._data_loader_instances[query_params]
251
+ else:
252
+ loader = PeerRelationshipsDataLoader(db=db, query_params=query_params)
253
+ self._data_loader_instances[query_params] = loader
254
+ all_peer_rels = []
255
+ for node_id in ids:
256
+ node_peer_rels = await loader.load(key=node_id)
257
+ all_peer_rels.extend(node_peer_rels)
258
+ if not all_peer_rels:
259
+ return None
260
+ async with db.start_session() as dbs:
261
+ return [
262
+ await obj.to_graphql(db=dbs, fields=node_fields, related_node_ids=related_node_ids)
263
+ for obj in all_peer_rels
264
+ ]
@@ -6,13 +6,11 @@ from infrahub_sdk.utils import extract_fields
6
6
 
7
7
  from infrahub.core.constants import BranchSupportType, InfrahubKind, RelationshipHierarchyDirection
8
8
  from infrahub.core.manager import NodeManager
9
- from infrahub.core.query.node import NodeGetHierarchyQuery
10
9
  from infrahub.exceptions import NodeNotFoundError
11
10
 
12
11
  from ..models import OrderModel
13
12
  from ..parser import extract_selection
14
13
  from ..permissions import get_permissions
15
- from ..types import RELATIONS_PROPERTY_MAP, RELATIONS_PROPERTY_MAP_REVERSED
16
14
 
17
15
  if TYPE_CHECKING:
18
16
  from graphql import GraphQLResolveInfo
@@ -217,109 +215,11 @@ async def single_relationship_resolver(parent: dict, info: GraphQLResolveInfo, *
217
215
 
218
216
 
219
217
  async def many_relationship_resolver(
220
- parent: dict, info: GraphQLResolveInfo, include_descendants: Optional[bool] = False, **kwargs: Any
218
+ parent: dict, info: GraphQLResolveInfo, include_descendants: bool | None = False, **kwargs: Any
221
219
  ) -> dict[str, Any]:
222
- """Resolver for relationships of cardinality=many for Edged responses
223
-
224
- This resolver is used for paginated responses and as such we redefined the requested
225
- fields by only reusing information below the 'node' key.
226
- """
227
- # Extract the InfraHub schema by inspecting the GQL Schema
228
- node_schema: NodeSchema = info.parent_type.graphene_type._meta.schema
229
-
230
220
  context: GraphqlContext = info.context
231
-
232
- # Extract the name of the fields in the GQL query
233
- fields = await extract_fields(info.field_nodes[0].selection_set)
234
- edges = fields.get("edges", {})
235
- node_fields = edges.get("node", {})
236
- property_fields = edges.get("properties", {})
237
- for key, value in property_fields.items():
238
- mapped_name = RELATIONS_PROPERTY_MAP[key]
239
- node_fields[mapped_name] = value
240
-
241
- # Extract the schema of the node on the other end of the relationship from the GQL Schema
242
- node_rel = node_schema.get_relationship(info.field_name)
243
-
244
- # Extract only the filters from the kwargs and prepend the name of the field to the filters
245
- offset = kwargs.pop("offset", None)
246
- limit = kwargs.pop("limit", None)
247
-
248
- filters = {
249
- f"{info.field_name}__{key}": value
250
- for key, value in kwargs.items()
251
- if "__" in key and value or key in ["id", "ids"]
252
- }
253
-
254
- response: dict[str, Any] = {"edges": [], "count": None}
255
-
256
- source_kind = node_schema.kind
257
-
258
- async with context.db.start_session() as db:
259
- ids = [parent["id"]]
260
- if include_descendants:
261
- query = await NodeGetHierarchyQuery.init(
262
- db=db,
263
- direction=RelationshipHierarchyDirection.DESCENDANTS,
264
- node_id=parent["id"],
265
- node_schema=node_schema,
266
- at=context.at,
267
- branch=context.branch,
268
- )
269
- if node_schema.hierarchy:
270
- source_kind = node_schema.hierarchy
271
- await query.execute(db=db)
272
- descendants_ids = list(query.get_peer_ids())
273
- ids.extend(descendants_ids)
274
-
275
- if "count" in fields:
276
- response["count"] = await NodeManager.count_peers(
277
- db=db,
278
- ids=ids,
279
- source_kind=source_kind,
280
- schema=node_rel,
281
- filters=filters,
282
- at=context.at,
283
- branch=context.branch,
284
- branch_agnostic=node_rel.branch is BranchSupportType.AGNOSTIC,
285
- )
286
-
287
- if not node_fields:
288
- return response
289
-
290
- objs = await NodeManager.query_peers(
291
- db=db,
292
- ids=ids,
293
- source_kind=source_kind,
294
- schema=node_rel,
295
- filters=filters,
296
- fields=node_fields,
297
- offset=offset,
298
- limit=limit,
299
- at=context.at,
300
- branch=context.branch,
301
- branch_agnostic=node_rel.branch is BranchSupportType.AGNOSTIC,
302
- fetch_peers=True,
303
- )
304
-
305
- if not objs:
306
- return response
307
- node_graph = [
308
- await obj.to_graphql(db=db, fields=node_fields, related_node_ids=context.related_node_ids) for obj in objs
309
- ]
310
-
311
- entries = []
312
- for node in node_graph:
313
- entry = {"node": {}, "properties": {}}
314
- for key, mapped in RELATIONS_PROPERTY_MAP_REVERSED.items():
315
- value = node.pop(key, None)
316
- if value:
317
- entry["properties"][mapped] = value
318
- entry["node"] = node
319
- entries.append(entry)
320
- response["edges"] = entries
321
-
322
- return response
221
+ resolver = context.many_relationship_resolver
222
+ return await resolver.resolve(parent=parent, info=info, include_descendants=include_descendants, **kwargs)
323
223
 
324
224
 
325
225
  async def ancestors_resolver(parent: dict, info: GraphQLResolveInfo, **kwargs) -> dict[str, Any]:
@@ -8,6 +8,7 @@ from infrahub.core.constants import InfrahubKind
8
8
  from infrahub.core.manager import NodeManager
9
9
  from infrahub.core.protocols import CoreGraphQLQuery
10
10
  from infrahub.core.timestamp import Timestamp
11
+ from infrahub.graphql.resolvers.many_relationship import ManyRelationshipResolver
11
12
  from infrahub.graphql.resolvers.single_relationship import SingleRelationshipResolver
12
13
  from infrahub.log import get_logger
13
14
 
@@ -48,6 +49,7 @@ async def resolver_graphql_query(
48
49
  related_node_ids=set(),
49
50
  types=context.types,
50
51
  single_relationship_resolver=SingleRelationshipResolver(),
52
+ many_relationship_resolver=ManyRelationshipResolver(),
51
53
  ),
52
54
  root_value=None,
53
55
  variable_values=params or {},
infrahub_sdk/batch.py CHANGED
@@ -30,7 +30,7 @@ class BatchTaskSync:
30
30
  result = None
31
31
  try:
32
32
  result = self.task(*self.args, **self.kwargs)
33
- except Exception as exc: # pylint: disable=broad-exception-caught
33
+ except Exception as exc:
34
34
  if return_exceptions:
35
35
  return self.node, exc
36
36
  raise exc
@@ -44,7 +44,7 @@ async def execute_batch_task_in_pool(
44
44
  async with semaphore:
45
45
  try:
46
46
  result = await task.task(*task.args, **task.kwargs)
47
- except Exception as exc: # pylint: disable=broad-exception-caught
47
+ except Exception as exc:
48
48
  if return_exceptions:
49
49
  return (task.node, exc)
50
50
  raise exc
infrahub_sdk/client.py CHANGED
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import asyncio
4
4
  import copy
5
5
  import logging
6
+ import time
6
7
  from collections.abc import Coroutine, MutableMapping
7
8
  from functools import wraps
8
9
  from time import sleep
@@ -38,6 +39,7 @@ from .exceptions import (
38
39
  NodeNotFoundError,
39
40
  ServerNotReachableError,
40
41
  ServerNotResponsiveError,
42
+ URLNotFoundError,
41
43
  )
42
44
  from .graphql import Mutation, Query
43
45
  from .node import (
@@ -878,7 +880,8 @@ class InfrahubClient(BaseClient):
878
880
 
879
881
  retry = True
880
882
  resp = None
881
- while retry:
883
+ start_time = time.time()
884
+ while retry and time.time() - start_time < self.config.max_retry_duration:
882
885
  retry = self.retry_on_failure
883
886
  try:
884
887
  resp = await self._post(url=url, payload=payload, headers=headers, timeout=timeout)
@@ -902,6 +905,8 @@ class InfrahubClient(BaseClient):
902
905
  errors = response.get("errors", [])
903
906
  messages = [error.get("message") for error in errors]
904
907
  raise AuthenticationError(" | ".join(messages)) from exc
908
+ if exc.response.status_code == 404:
909
+ raise URLNotFoundError(url=url)
905
910
 
906
911
  if not resp:
907
912
  raise Error("Unexpected situation, resp hasn't been initialized.")
@@ -1613,7 +1618,8 @@ class InfrahubClientSync(BaseClient):
1613
1618
 
1614
1619
  retry = True
1615
1620
  resp = None
1616
- while retry:
1621
+ start_time = time.time()
1622
+ while retry and time.time() - start_time < self.config.max_retry_duration:
1617
1623
  retry = self.retry_on_failure
1618
1624
  try:
1619
1625
  resp = self._post(url=url, payload=payload, headers=headers, timeout=timeout)
@@ -1637,6 +1643,8 @@ class InfrahubClientSync(BaseClient):
1637
1643
  errors = response.get("errors", [])
1638
1644
  messages = [error.get("message") for error in errors]
1639
1645
  raise AuthenticationError(" | ".join(messages)) from exc
1646
+ if exc.response.status_code == 404:
1647
+ raise URLNotFoundError(url=url)
1640
1648
 
1641
1649
  if not resp:
1642
1650
  raise Error("Unexpected situation, resp hasn't been initialized.")
infrahub_sdk/config.py CHANGED
@@ -56,6 +56,9 @@ class ConfigBase(BaseSettings):
56
56
  pagination_size: int = Field(default=50, description="Page size for queries to the server")
57
57
  retry_delay: int = Field(default=5, description="Number of seconds to wait until attempting a retry.")
58
58
  retry_on_failure: bool = Field(default=False, description="Retry operation in case of failure")
59
+ max_retry_duration: int = Field(
60
+ default=300, description="Maximum duration until we stop attempting to retry if enabled."
61
+ )
59
62
  schema_converge_timeout: int = Field(
60
63
  default=60, description="Number of seconds to wait for schema to have converged"
61
64
  )
@@ -113,7 +116,7 @@ class ConfigBase(BaseSettings):
113
116
 
114
117
  @model_validator(mode="after")
115
118
  def validate_proxy_config(self) -> Self:
116
- if self.proxy and self.proxy_mounts.is_set: # pylint: disable=no-member
119
+ if self.proxy and self.proxy_mounts.is_set:
117
120
  raise ValueError("'proxy' and 'proxy_mounts' are mutually exclusive")
118
121
  return self
119
122
 
infrahub_sdk/ctl/check.py CHANGED
@@ -5,7 +5,7 @@ import sys
5
5
  from asyncio import run as aiorun
6
6
  from dataclasses import dataclass
7
7
  from pathlib import Path
8
- from typing import TYPE_CHECKING
8
+ from typing import TYPE_CHECKING, Optional
9
9
 
10
10
  import typer
11
11
  from rich.console import Console
@@ -50,8 +50,8 @@ def run(
50
50
  format_json: bool,
51
51
  list_available: bool,
52
52
  variables: dict[str, str],
53
- name: str | None = None,
54
- branch: str | None = None,
53
+ name: Optional[str] = None,
54
+ branch: Optional[str] = None,
55
55
  ) -> None:
56
56
  """Locate and execute all checks under the defined path."""
57
57
 
@@ -121,7 +121,7 @@ async def run_check(
121
121
  except QueryNotFoundError as exc:
122
122
  log.warning(f"{module_name}::{check}: unable to find query ({exc!s})")
123
123
  passed = False
124
- except Exception as exc: # pylint: disable=broad-exception-caught
124
+ except Exception as exc:
125
125
  log.warning(f"{module_name}::{check}: An error occurred during execution ({exc})")
126
126
  passed = False
127
127