infrahub-server 1.3.0a0__py3-none-any.whl → 1.3.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. infrahub/core/attribute.py +3 -3
  2. infrahub/core/constants/__init__.py +5 -0
  3. infrahub/core/constants/infrahubkind.py +2 -0
  4. infrahub/core/migrations/query/attribute_rename.py +2 -4
  5. infrahub/core/migrations/query/delete_element_in_schema.py +16 -11
  6. infrahub/core/migrations/query/node_duplicate.py +16 -15
  7. infrahub/core/migrations/query/relationship_duplicate.py +16 -11
  8. infrahub/core/migrations/schema/node_attribute_remove.py +1 -2
  9. infrahub/core/migrations/schema/node_remove.py +16 -13
  10. infrahub/core/node/__init__.py +72 -14
  11. infrahub/core/node/resource_manager/ip_address_pool.py +6 -2
  12. infrahub/core/node/resource_manager/ip_prefix_pool.py +6 -2
  13. infrahub/core/node/resource_manager/number_pool.py +31 -5
  14. infrahub/core/node/standard.py +6 -1
  15. infrahub/core/protocols.py +9 -0
  16. infrahub/core/query/relationship.py +2 -4
  17. infrahub/core/schema/attribute_parameters.py +129 -5
  18. infrahub/core/schema/attribute_schema.py +38 -10
  19. infrahub/core/schema/definitions/core/__init__.py +16 -2
  20. infrahub/core/schema/definitions/core/group.py +45 -0
  21. infrahub/core/schema/definitions/core/resource_pool.py +20 -0
  22. infrahub/core/schema/definitions/internal.py +16 -3
  23. infrahub/core/schema/generated/attribute_schema.py +12 -5
  24. infrahub/core/schema/manager.py +3 -0
  25. infrahub/core/schema/schema_branch.py +55 -0
  26. infrahub/core/validators/__init__.py +8 -0
  27. infrahub/core/validators/attribute/choices.py +0 -1
  28. infrahub/core/validators/attribute/enum.py +0 -1
  29. infrahub/core/validators/attribute/kind.py +0 -1
  30. infrahub/core/validators/attribute/length.py +0 -1
  31. infrahub/core/validators/attribute/min_max.py +118 -0
  32. infrahub/core/validators/attribute/number_pool.py +106 -0
  33. infrahub/core/validators/attribute/optional.py +0 -2
  34. infrahub/core/validators/attribute/regex.py +0 -1
  35. infrahub/core/validators/enum.py +5 -0
  36. infrahub/database/__init__.py +15 -3
  37. infrahub/git/base.py +5 -3
  38. infrahub/git/integrator.py +102 -3
  39. infrahub/graphql/mutations/resource_manager.py +62 -6
  40. infrahub/graphql/queries/resource_manager.py +7 -1
  41. infrahub/graphql/queries/task.py +10 -0
  42. infrahub/graphql/types/task_log.py +3 -2
  43. infrahub/menu/menu.py +3 -3
  44. infrahub/pools/number.py +5 -3
  45. infrahub/task_manager/task.py +44 -4
  46. infrahub/types.py +6 -0
  47. infrahub_sdk/client.py +43 -10
  48. infrahub_sdk/node/__init__.py +39 -0
  49. infrahub_sdk/node/attribute.py +122 -0
  50. infrahub_sdk/node/constants.py +21 -0
  51. infrahub_sdk/{node.py → node/node.py} +50 -749
  52. infrahub_sdk/node/parsers.py +15 -0
  53. infrahub_sdk/node/property.py +24 -0
  54. infrahub_sdk/node/related_node.py +266 -0
  55. infrahub_sdk/node/relationship.py +302 -0
  56. infrahub_sdk/protocols.py +112 -0
  57. infrahub_sdk/protocols_base.py +34 -2
  58. infrahub_sdk/query_groups.py +13 -2
  59. infrahub_sdk/schema/main.py +1 -0
  60. infrahub_sdk/schema/repository.py +16 -0
  61. infrahub_sdk/spec/object.py +1 -1
  62. infrahub_sdk/store.py +1 -1
  63. infrahub_sdk/testing/schemas/car_person.py +1 -0
  64. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b1.dist-info}/METADATA +3 -3
  65. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b1.dist-info}/RECORD +68 -59
  66. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b1.dist-info}/WHEEL +1 -1
  67. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b1.dist-info}/LICENSE.txt +0 -0
  68. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b1.dist-info}/entry_points.txt +0 -0
@@ -6,15 +6,17 @@ from graphene import Boolean, Field, InputField, InputObjectType, Int, List, Mut
6
6
  from graphene.types.generic import GenericScalar
7
7
  from typing_extensions import Self
8
8
 
9
- from infrahub.core import registry
9
+ from infrahub.core import protocols, registry
10
10
  from infrahub.core.constants import InfrahubKind
11
11
  from infrahub.core.ipam.constants import PrefixMemberType
12
+ from infrahub.core.manager import NodeManager
12
13
  from infrahub.core.schema import NodeSchema
14
+ from infrahub.core.schema.attribute_parameters import NumberAttributeParameters
13
15
  from infrahub.database import retry_db_transaction
14
16
  from infrahub.exceptions import QueryValidationError, SchemaNotFoundError, ValidationError
15
17
 
16
18
  from ..queries.resource_manager import PoolAllocatedNode
17
- from .main import InfrahubMutationMixin, InfrahubMutationOptions
19
+ from .main import DeleteResult, InfrahubMutationMixin, InfrahubMutationOptions
18
20
 
19
21
  if TYPE_CHECKING:
20
22
  from graphql import GraphQLResolveInfo
@@ -177,14 +179,16 @@ class InfrahubNumberPoolMutation(InfrahubMutationMixin, Mutation):
177
179
  database: InfrahubDatabase | None = None, # noqa: ARG003
178
180
  ) -> Any:
179
181
  try:
180
- pool_node = registry.schema.get(name=data["node"].value)
181
- if not pool_node.is_generic_schema and not pool_node.is_node_schema:
182
+ schema_node = registry.schema.get(name=data["node"].value)
183
+ if not schema_node.is_generic_schema and not schema_node.is_node_schema:
182
184
  raise ValidationError(input_value="The selected model is not a Node or a Generic")
183
185
  except SchemaNotFoundError as exc:
184
186
  exc.message = "The selected model does not exist"
185
187
  raise exc
186
188
 
187
- attributes = [attribute for attribute in pool_node.attributes if attribute.name == data["node_attribute"].value]
189
+ attributes = [
190
+ attribute for attribute in schema_node.attributes if attribute.name == data["node_attribute"].value
191
+ ]
188
192
  if not attributes:
189
193
  raise ValidationError(input_value="The selected attribute doesn't exist in the selected model")
190
194
 
@@ -192,9 +196,22 @@ class InfrahubNumberPoolMutation(InfrahubMutationMixin, Mutation):
192
196
  if attribute.kind != "Number":
193
197
  raise ValidationError(input_value="The selected attribute is not of the kind Number")
194
198
 
195
- if data["start_range"].value > data["end_range"].value:
199
+ start_range = data["start_range"].value
200
+ end_range = data["end_range"].value
201
+ if start_range > end_range:
196
202
  raise ValidationError(input_value="start_range can't be larger than end_range")
197
203
 
204
+ if not isinstance(attribute.parameters, NumberAttributeParameters):
205
+ raise ValidationError(
206
+ input_value="The selected attribute parameters are not of the kind NumberAttributeParameters"
207
+ )
208
+
209
+ if attribute.parameters.min_value is not None and start_range < attribute.parameters.min_value:
210
+ raise ValidationError(input_value="start_range can't be less than min_value")
211
+
212
+ if attribute.parameters.max_value is not None and end_range > attribute.parameters.max_value:
213
+ raise ValidationError(input_value="end_range can't be larger than max_value")
214
+
198
215
  return await super().mutate_create(info=info, data=data, branch=branch)
199
216
 
200
217
  @classmethod
@@ -221,3 +238,42 @@ class InfrahubNumberPoolMutation(InfrahubMutationMixin, Mutation):
221
238
  raise ValidationError(input_value="start_range can't be larger than end_range")
222
239
 
223
240
  return number_pool, result
241
+
242
+ @classmethod
243
+ @retry_db_transaction(name="resource_manager_update")
244
+ async def mutate_delete(
245
+ cls,
246
+ info: GraphQLResolveInfo,
247
+ data: InputObjectType,
248
+ branch: Branch,
249
+ ) -> DeleteResult:
250
+ graphql_context: GraphqlContext = info.context
251
+
252
+ number_pool = await NodeManager.find_object(
253
+ db=graphql_context.db,
254
+ kind=protocols.CoreNumberPool,
255
+ id=data.get("id"),
256
+ hfid=data.get("hfid"),
257
+ branch=branch,
258
+ )
259
+
260
+ active_branches = registry.schema.get_branches()
261
+ violating_branches = []
262
+ for active_branch in active_branches:
263
+ try:
264
+ schema = registry.schema.get(name=number_pool.node.value, branch=active_branch)
265
+ except SchemaNotFoundError:
266
+ continue
267
+
268
+ if number_pool.node_attribute.value in schema.attribute_names:
269
+ attribute = schema.get_attribute(name=number_pool.node_attribute.value)
270
+ if attribute.kind == "NumberPool":
271
+ violating_branches.append(active_branch)
272
+
273
+ if violating_branches:
274
+ raise ValidationError(
275
+ input_value=f"Unable to delete number pool {number_pool.node.value}.{number_pool.node_attribute.value}"
276
+ f" is in use (branches: {','.join(violating_branches)})"
277
+ )
278
+
279
+ return await super().mutate_delete(info=info, data=data, branch=branch)
@@ -306,7 +306,13 @@ async def resolve_number_pool_allocation(
306
306
  async def resolve_number_pool_utilization(
307
307
  db: InfrahubDatabase, pool: CoreNode, at: Timestamp | str | None, branch: Branch
308
308
  ) -> dict:
309
- number_pool = NumberUtilizationGetter(db=db, pool=pool, at=at, branch=branch)
309
+ """
310
+ Returns a mapping containg utilization info of a number pool.
311
+ The utilization is calculated as the percentage of the total number of values in the pool that are not excluded for the corresponding attribute.
312
+ """
313
+
314
+ core_number_pool = await registry.manager.get_one_by_id_or_default_filter(db=db, id=pool.id, kind="CoreNumberPool")
315
+ number_pool = NumberUtilizationGetter(db=db, pool=core_number_pool, at=at, branch=branch)
310
316
  await number_pool.load_data()
311
317
 
312
318
  return {
@@ -32,6 +32,8 @@ class Tasks(ObjectType):
32
32
  workflow: list[str] | None = None,
33
33
  related_node__ids: list | None = None,
34
34
  q: str | None = None,
35
+ log_limit: int | None = None,
36
+ log_offset: int | None = None,
35
37
  ) -> dict[str, Any]:
36
38
  related_nodes = related_node__ids or []
37
39
  ids = ids or []
@@ -45,6 +47,8 @@ class Tasks(ObjectType):
45
47
  statuses=state,
46
48
  workflows=workflow,
47
49
  related_nodes=related_nodes,
50
+ log_limit=log_limit,
51
+ log_offset=log_offset,
48
52
  )
49
53
 
50
54
  @staticmethod
@@ -71,6 +75,8 @@ class Tasks(ObjectType):
71
75
  branch: str | None = None,
72
76
  limit: int | None = None,
73
77
  offset: int | None = None,
78
+ log_limit: int | None = None,
79
+ log_offset: int | None = None,
74
80
  ) -> dict[str, Any]:
75
81
  graphql_context: GraphqlContext = info.context
76
82
  fields = await extract_fields_first_node(info)
@@ -87,6 +93,8 @@ class Tasks(ObjectType):
87
93
  related_nodes=related_nodes,
88
94
  limit=limit,
89
95
  offset=offset,
96
+ log_limit=log_limit,
97
+ log_offset=log_offset,
90
98
  )
91
99
  prefect_count = prefect_tasks.get("count", None)
92
100
  return {
@@ -105,6 +113,8 @@ Task = Field(
105
113
  workflow=List(String),
106
114
  ids=List(String),
107
115
  q=String(required=False),
116
+ log_limit=Int(required=False),
117
+ log_offset=Int(required=False),
108
118
  resolver=Tasks.resolve,
109
119
  required=True,
110
120
  )
@@ -1,4 +1,4 @@
1
- from graphene import Field, InputObjectType, List, ObjectType, String
1
+ from graphene import Field, InputObjectType, Int, List, NonNull, ObjectType, String
2
2
  from graphene.types.uuid import UUID
3
3
 
4
4
  from .enums import Severity
@@ -26,4 +26,5 @@ class TaskLogNodes(ObjectType):
26
26
 
27
27
 
28
28
  class TaskLogEdge(ObjectType):
29
- edges = List(TaskLogNodes)
29
+ edges = List(NonNull(TaskLogNodes), required=True)
30
+ count = Int(required=True)
infrahub/menu/menu.py CHANGED
@@ -249,7 +249,7 @@ default_menu = [
249
249
  MenuItemDefinition(
250
250
  namespace="Builtin",
251
251
  name="TriggerDefinition",
252
- label="Triggers",
252
+ label="Events",
253
253
  icon=_extract_node_icon(infrahub_schema.get(InfrahubKind.TRIGGERRULE)),
254
254
  protected=True,
255
255
  section=MenuSection.INTERNAL,
@@ -258,7 +258,7 @@ default_menu = [
258
258
  MenuItemDefinition(
259
259
  namespace="Core",
260
260
  name="TriggerRule",
261
- label="Trigger Rules",
261
+ label="Rules",
262
262
  kind=InfrahubKind.TRIGGERRULE,
263
263
  icon=_extract_node_icon(infrahub_schema.get(InfrahubKind.TRIGGERRULE)),
264
264
  protected=True,
@@ -268,7 +268,7 @@ default_menu = [
268
268
  MenuItemDefinition(
269
269
  namespace="Core",
270
270
  name="Action",
271
- label="Trigger Actions",
271
+ label="Actions",
272
272
  kind=InfrahubKind.ACTION,
273
273
  icon=_extract_node_icon(infrahub_schema.get(InfrahubKind.ACTION)),
274
274
  protected=True,
infrahub/pools/number.py CHANGED
@@ -8,7 +8,7 @@ from infrahub.core.registry import registry
8
8
 
9
9
  if TYPE_CHECKING:
10
10
  from infrahub.core.branch import Branch
11
- from infrahub.core.protocols import CoreNode
11
+ from infrahub.core.node.resource_manager.number_pool import CoreNumberPool
12
12
  from infrahub.core.timestamp import Timestamp
13
13
  from infrahub.database import InfrahubDatabase
14
14
 
@@ -20,7 +20,9 @@ class UsedNumber:
20
20
 
21
21
 
22
22
  class NumberUtilizationGetter:
23
- def __init__(self, db: InfrahubDatabase, pool: CoreNode, branch: Branch, at: Timestamp | str | None = None) -> None:
23
+ def __init__(
24
+ self, db: InfrahubDatabase, pool: CoreNumberPool, branch: Branch, at: Timestamp | str | None = None
25
+ ) -> None:
24
26
  self.db = db
25
27
  self.at = at
26
28
  self.pool = pool
@@ -62,4 +64,4 @@ class NumberUtilizationGetter:
62
64
 
63
65
  @property
64
66
  def total_pool_size(self) -> int:
65
- return self.end_range - self.start_range + 1
67
+ return self.end_range - self.start_range + 1 - self.pool.get_attribute_nb_excluded_values()
@@ -35,6 +35,9 @@ from .models import FlowLogs, FlowProgress, RelatedNodesInfo
35
35
 
36
36
  log = get_logger()
37
37
 
38
+ NB_LOGS_LIMIT = 10_000
39
+ PREFECT_MAX_LOGS_PER_CALL = 200
40
+
38
41
 
39
42
  class PrefectTask:
40
43
  @classmethod
@@ -83,9 +86,42 @@ class PrefectTask:
83
86
  return related_nodes
84
87
 
85
88
  @classmethod
86
- async def _get_logs(cls, client: PrefectClient, flow_ids: list[UUID]) -> FlowLogs:
89
+ async def _get_logs(
90
+ cls, client: PrefectClient, flow_ids: list[UUID], log_limit: int | None, log_offset: int | None
91
+ ) -> FlowLogs:
92
+ """
93
+ Return the logs for a flow run, based on log_limit and log_offset.
94
+ At most, NB_LOGS_LIMIT logs will be returned per flow.
95
+ """
96
+
87
97
  logs_flow = FlowLogs()
88
- all_logs = await client.read_logs(log_filter=LogFilter(flow_run_id=LogFilterFlowRunId(any_=flow_ids)))
98
+
99
+ log_limit = log_limit if log_limit is not None else NB_LOGS_LIMIT
100
+ log_offset = log_offset or 0
101
+ current_offset = log_offset
102
+
103
+ if log_limit > NB_LOGS_LIMIT:
104
+ raise ValueError(f"log_limit cannot be greater than {NB_LOGS_LIMIT}")
105
+
106
+ all_logs = []
107
+
108
+ # Fetch the logs in batches of PREFECT_MAX_LOGS_PER_CALL, as prefect does not allow to fetch more logs at once.
109
+ remaining = min(log_limit, NB_LOGS_LIMIT)
110
+ while remaining > 0:
111
+ batch_limit = min(PREFECT_MAX_LOGS_PER_CALL, remaining)
112
+ logs_batch = await client.read_logs(
113
+ log_filter=LogFilter(flow_run_id=LogFilterFlowRunId(any_=flow_ids)),
114
+ offset=current_offset,
115
+ limit=batch_limit,
116
+ )
117
+ all_logs.extend(logs_batch)
118
+ nb_fetched = len(logs_batch)
119
+ if nb_fetched < batch_limit:
120
+ break # No more logs to fetch
121
+
122
+ current_offset += nb_fetched
123
+ remaining -= nb_fetched
124
+
89
125
  for flow_log in all_logs:
90
126
  if flow_log.flow_run_id and flow_log.message not in ["Finished in state Completed()"]:
91
127
  logs_flow.logs[flow_log.flow_run_id].append(flow_log)
@@ -188,6 +224,8 @@ class PrefectTask:
188
224
  branch: str | None = None,
189
225
  limit: int | None = None,
190
226
  offset: int | None = None,
227
+ log_limit: int | None = None,
228
+ log_offset: int | None = None,
191
229
  ) -> dict[str, Any]:
192
230
  nodes: list[dict] = []
193
231
  count = None
@@ -219,7 +257,9 @@ class PrefectTask:
219
257
  sort=FlowRunSort.START_TIME_DESC,
220
258
  )
221
259
  if log_fields:
222
- logs_flow = await cls._get_logs(client=client, flow_ids=[flow.id for flow in flows])
260
+ logs_flow = await cls._get_logs(
261
+ client=client, flow_ids=[flow.id for flow in flows], log_limit=log_limit, log_offset=log_offset
262
+ )
223
263
 
224
264
  if "progress" in node_fields:
225
265
  progress_flow = await cls._get_progress(client=client, flow_ids=[flow.id for flow in flows])
@@ -265,7 +305,7 @@ class PrefectTask:
265
305
  "updated_at": flow.updated.to_iso8601_string(), # type: ignore
266
306
  "start_time": flow.start_time.to_iso8601_string() if flow.start_time else None,
267
307
  "id": flow.id,
268
- "logs": {"edges": logs},
308
+ "logs": {"edges": logs, "count": len(logs)},
269
309
  }
270
310
  }
271
311
  )
infrahub/types.py CHANGED
@@ -235,6 +235,10 @@ class Number(InfrahubDataType):
235
235
  infrahub = "Integer"
236
236
 
237
237
 
238
+ class NumberPool(Number):
239
+ label: str = "Number Pool"
240
+
241
+
238
242
  class Bandwidth(InfrahubDataType):
239
243
  label: str = "Bandwidth"
240
244
  graphql = graphene.Int
@@ -329,6 +333,7 @@ ATTRIBUTE_TYPES: dict[str, type[InfrahubDataType]] = {
329
333
  "MacAddress": MacAddress,
330
334
  "Color": Color,
331
335
  "Number": Number,
336
+ "NumberPool": NumberPool,
332
337
  "Bandwidth": Bandwidth,
333
338
  "IPHost": IPHost,
334
339
  "IPNetwork": IPNetwork,
@@ -353,6 +358,7 @@ ATTRIBUTE_PYTHON_TYPES: dict[str, type] = {
353
358
  "MacAddress": str, # MAC addresses can be straightforward strings
354
359
  "Color": str, # Colors often represented as hex strings
355
360
  "Number": float, # Numbers can be floats for general use
361
+ "NumberPool": float, # Numbers can be floats for general use
356
362
  "Bandwidth": float, # Bandwidth in some units, represented as a float
357
363
  "IPHost": IPvAnyAddress, # type: ignore[dict-item]
358
364
  "IPNetwork": str,
infrahub_sdk/client.py CHANGED
@@ -172,11 +172,18 @@ class BaseClient:
172
172
  params: dict[str, Any] | None = None,
173
173
  delete_unused_nodes: bool = False,
174
174
  group_type: str | None = None,
175
+ group_params: dict[str, Any] | None = None,
176
+ branch: str | None = None,
175
177
  ) -> Self:
176
178
  self.mode = InfrahubClientMode.TRACKING
177
179
  identifier = identifier or self.identifier or "python-sdk"
178
180
  self.set_context_properties(
179
- identifier=identifier, params=params, delete_unused_nodes=delete_unused_nodes, group_type=group_type
181
+ identifier=identifier,
182
+ params=params,
183
+ delete_unused_nodes=delete_unused_nodes,
184
+ group_type=group_type,
185
+ group_params=group_params,
186
+ branch=branch,
180
187
  )
181
188
  return self
182
189
 
@@ -187,14 +194,22 @@ class BaseClient:
187
194
  delete_unused_nodes: bool = True,
188
195
  reset: bool = True,
189
196
  group_type: str | None = None,
197
+ group_params: dict[str, Any] | None = None,
198
+ branch: str | None = None,
190
199
  ) -> None:
191
200
  if reset:
192
201
  if isinstance(self, InfrahubClient):
193
202
  self.group_context = InfrahubGroupContext(self)
194
203
  elif isinstance(self, InfrahubClientSync):
195
204
  self.group_context = InfrahubGroupContextSync(self)
205
+
196
206
  self.group_context.set_properties(
197
- identifier=identifier, params=params, delete_unused_nodes=delete_unused_nodes, group_type=group_type
207
+ identifier=identifier,
208
+ params=params,
209
+ delete_unused_nodes=delete_unused_nodes,
210
+ group_type=group_type,
211
+ group_params=group_params,
212
+ branch=branch,
198
213
  )
199
214
 
200
215
  def _graphql_url(
@@ -562,18 +577,27 @@ class InfrahubClient(BaseClient):
562
577
  at: Timestamp | None = None,
563
578
  branch: str | None = None,
564
579
  timeout: int | None = None,
580
+ partial_match: bool = False,
565
581
  **kwargs: Any,
566
582
  ) -> int:
567
583
  """Return the number of nodes of a given kind."""
568
- filters = kwargs
569
- schema = await self.schema.get(kind=kind, branch=branch)
584
+ filters: dict[str, Any] = dict(kwargs)
570
585
 
586
+ if partial_match:
587
+ filters["partial_match"] = True
588
+
589
+ schema = await self.schema.get(kind=kind, branch=branch)
571
590
  branch = branch or self.default_branch
572
591
  if at:
573
592
  at = Timestamp(at)
574
593
 
594
+ data: dict[str, Any] = {
595
+ "count": None,
596
+ "@filters": filters,
597
+ }
598
+
575
599
  response = await self.execute_graphql(
576
- query=Query(query={schema.kind: {"count": None, "@filters": filters}}).render(),
600
+ query=Query(query={schema.kind: data}).render(),
577
601
  branch_name=branch,
578
602
  at=at,
579
603
  timeout=timeout,
@@ -801,7 +825,7 @@ class InfrahubClient(BaseClient):
801
825
  nodes = []
802
826
  related_nodes = []
803
827
  batch_process = await self.create_batch()
804
- count = await self.count(kind=schema.kind, **filters)
828
+ count = await self.count(kind=schema.kind, partial_match=partial_match, **filters)
805
829
  total_pages = (count + pagination_size - 1) // pagination_size
806
830
 
807
831
  for page_number in range(1, total_pages + 1):
@@ -1683,18 +1707,27 @@ class InfrahubClientSync(BaseClient):
1683
1707
  at: Timestamp | None = None,
1684
1708
  branch: str | None = None,
1685
1709
  timeout: int | None = None,
1710
+ partial_match: bool = False,
1686
1711
  **kwargs: Any,
1687
1712
  ) -> int:
1688
1713
  """Return the number of nodes of a given kind."""
1689
- filters = kwargs
1690
- schema = self.schema.get(kind=kind, branch=branch)
1714
+ filters: dict[str, Any] = dict(kwargs)
1715
+
1716
+ if partial_match:
1717
+ filters["partial_match"] = True
1691
1718
 
1719
+ schema = self.schema.get(kind=kind, branch=branch)
1692
1720
  branch = branch or self.default_branch
1693
1721
  if at:
1694
1722
  at = Timestamp(at)
1695
1723
 
1724
+ data: dict[str, Any] = {
1725
+ "count": None,
1726
+ "@filters": filters,
1727
+ }
1728
+
1696
1729
  response = self.execute_graphql(
1697
- query=Query(query={schema.kind: {"count": None, "@filters": filters}}).render(),
1730
+ query=Query(query={schema.kind: data}).render(),
1698
1731
  branch_name=branch,
1699
1732
  at=at,
1700
1733
  timeout=timeout,
@@ -1957,7 +1990,7 @@ class InfrahubClientSync(BaseClient):
1957
1990
  related_nodes = []
1958
1991
  batch_process = self.create_batch()
1959
1992
 
1960
- count = self.count(kind=schema.kind, **filters)
1993
+ count = self.count(kind=schema.kind, partial_match=partial_match, **filters)
1961
1994
  total_pages = (count + pagination_size - 1) // pagination_size
1962
1995
 
1963
1996
  for page_number in range(1, total_pages + 1):
@@ -0,0 +1,39 @@
1
+ from __future__ import annotations
2
+
3
+ from .constants import (
4
+ ARTIFACT_DEFINITION_GENERATE_FEATURE_NOT_SUPPORTED_MESSAGE,
5
+ ARTIFACT_FETCH_FEATURE_NOT_SUPPORTED_MESSAGE,
6
+ ARTIFACT_GENERATE_FEATURE_NOT_SUPPORTED_MESSAGE,
7
+ HFID_STR_SEPARATOR,
8
+ IP_TYPES,
9
+ PROPERTIES_FLAG,
10
+ PROPERTIES_OBJECT,
11
+ SAFE_VALUE,
12
+ )
13
+ from .node import InfrahubNode, InfrahubNodeBase, InfrahubNodeSync
14
+ from .parsers import parse_human_friendly_id
15
+ from .property import NodeProperty
16
+ from .related_node import RelatedNode, RelatedNodeBase, RelatedNodeSync
17
+ from .relationship import RelationshipManager, RelationshipManagerBase, RelationshipManagerSync
18
+
19
+ __all__ = [
20
+ "ARTIFACT_DEFINITION_GENERATE_FEATURE_NOT_SUPPORTED_MESSAGE",
21
+ "ARTIFACT_FETCH_FEATURE_NOT_SUPPORTED_MESSAGE",
22
+ "ARTIFACT_GENERATE_FEATURE_NOT_SUPPORTED_MESSAGE",
23
+ "HFID_STR_SEPARATOR",
24
+ "IP_TYPES",
25
+ "PROPERTIES_FLAG",
26
+ "PROPERTIES_OBJECT",
27
+ "SAFE_VALUE",
28
+ "InfrahubNode",
29
+ "InfrahubNodeBase",
30
+ "InfrahubNodeSync",
31
+ "NodeProperty",
32
+ "RelatedNode",
33
+ "RelatedNodeBase",
34
+ "RelatedNodeSync",
35
+ "RelationshipManager",
36
+ "RelationshipManagerBase",
37
+ "RelationshipManagerSync",
38
+ "parse_human_friendly_id",
39
+ ]
@@ -0,0 +1,122 @@
1
+ from __future__ import annotations
2
+
3
+ import ipaddress
4
+ from typing import TYPE_CHECKING, Any, Callable, get_args
5
+
6
+ from ..protocols_base import CoreNodeBase
7
+ from ..uuidt import UUIDT
8
+ from .constants import IP_TYPES, PROPERTIES_FLAG, PROPERTIES_OBJECT, SAFE_VALUE
9
+ from .property import NodeProperty
10
+
11
+ if TYPE_CHECKING:
12
+ from ..schema import AttributeSchemaAPI
13
+
14
+
15
+ class Attribute:
16
+ """Represents an attribute of a Node, including its schema, value, and properties."""
17
+
18
+ def __init__(self, name: str, schema: AttributeSchemaAPI, data: Any | dict):
19
+ """
20
+ Args:
21
+ name (str): The name of the attribute.
22
+ schema (AttributeSchema): The schema defining the attribute.
23
+ data (Union[Any, dict]): The data for the attribute, either in raw form or as a dictionary.
24
+ """
25
+ self.name = name
26
+ self._schema = schema
27
+
28
+ if not isinstance(data, dict) or "value" not in data.keys():
29
+ data = {"value": data}
30
+
31
+ self._properties_flag = PROPERTIES_FLAG
32
+ self._properties_object = PROPERTIES_OBJECT
33
+ self._properties = self._properties_flag + self._properties_object
34
+
35
+ self._read_only = ["updated_at", "is_inherited"]
36
+
37
+ self.id: str | None = data.get("id", None)
38
+
39
+ self._value: Any | None = data.get("value", None)
40
+ self.value_has_been_mutated = False
41
+ self.is_default: bool | None = data.get("is_default", None)
42
+ self.is_from_profile: bool | None = data.get("is_from_profile", None)
43
+
44
+ if self._value:
45
+ value_mapper: dict[str, Callable] = {
46
+ "IPHost": ipaddress.ip_interface,
47
+ "IPNetwork": ipaddress.ip_network,
48
+ }
49
+ mapper = value_mapper.get(schema.kind, lambda value: value)
50
+ self._value = mapper(data.get("value"))
51
+
52
+ self.is_inherited: bool | None = data.get("is_inherited", None)
53
+ self.updated_at: str | None = data.get("updated_at", None)
54
+
55
+ self.is_visible: bool | None = data.get("is_visible", None)
56
+ self.is_protected: bool | None = data.get("is_protected", None)
57
+
58
+ self.source: NodeProperty | None = None
59
+ self.owner: NodeProperty | None = None
60
+
61
+ for prop_name in self._properties_object:
62
+ if data.get(prop_name):
63
+ setattr(self, prop_name, NodeProperty(data=data.get(prop_name))) # type: ignore[arg-type]
64
+
65
+ @property
66
+ def value(self) -> Any:
67
+ return self._value
68
+
69
+ @value.setter
70
+ def value(self, value: Any) -> None:
71
+ self._value = value
72
+ self.value_has_been_mutated = True
73
+
74
+ def _generate_input_data(self) -> dict | None:
75
+ data: dict[str, Any] = {}
76
+ variables: dict[str, Any] = {}
77
+
78
+ if self.value is None:
79
+ return data
80
+
81
+ if isinstance(self.value, str):
82
+ if SAFE_VALUE.match(self.value):
83
+ data["value"] = self.value
84
+ else:
85
+ var_name = f"value_{UUIDT.new().hex}"
86
+ variables[var_name] = self.value
87
+ data["value"] = f"${var_name}"
88
+ elif isinstance(self.value, get_args(IP_TYPES)):
89
+ data["value"] = self.value.with_prefixlen
90
+ elif isinstance(self.value, CoreNodeBase) and self.value.is_resource_pool():
91
+ data["from_pool"] = {"id": self.value.id}
92
+ else:
93
+ data["value"] = self.value
94
+
95
+ for prop_name in self._properties_flag:
96
+ if getattr(self, prop_name) is not None:
97
+ data[prop_name] = getattr(self, prop_name)
98
+
99
+ for prop_name in self._properties_object:
100
+ if getattr(self, prop_name) is not None:
101
+ data[prop_name] = getattr(self, prop_name)._generate_input_data()
102
+
103
+ return {"data": data, "variables": variables}
104
+
105
+ def _generate_query_data(self, property: bool = False) -> dict | None:
106
+ data: dict[str, Any] = {"value": None}
107
+
108
+ if property:
109
+ data.update({"is_default": None, "is_from_profile": None})
110
+
111
+ for prop_name in self._properties_flag:
112
+ data[prop_name] = None
113
+ for prop_name in self._properties_object:
114
+ data[prop_name] = {"id": None, "display_label": None, "__typename": None}
115
+
116
+ return data
117
+
118
+ def _generate_mutation_query(self) -> dict[str, Any]:
119
+ if isinstance(self.value, CoreNodeBase) and self.value.is_resource_pool():
120
+ # If it points to a pool, ask for the value of the pool allocated resource
121
+ return {self.name: {"value": None}}
122
+ return {}
@@ -0,0 +1,21 @@
1
+ import ipaddress
2
+ import re
3
+ from typing import Union
4
+
5
+ PROPERTIES_FLAG = ["is_visible", "is_protected"]
6
+ PROPERTIES_OBJECT = ["source", "owner"]
7
+ SAFE_VALUE = re.compile(r"(^[\. /:a-zA-Z0-9_-]+$)|(^$)")
8
+
9
+ IP_TYPES = Union[ipaddress.IPv4Interface, ipaddress.IPv6Interface, ipaddress.IPv4Network, ipaddress.IPv6Network]
10
+
11
+ ARTIFACT_FETCH_FEATURE_NOT_SUPPORTED_MESSAGE = (
12
+ "calling artifact_fetch is only supported for nodes that are Artifact Definition target"
13
+ )
14
+ ARTIFACT_GENERATE_FEATURE_NOT_SUPPORTED_MESSAGE = (
15
+ "calling artifact_generate is only supported for nodes that are Artifact Definition targets"
16
+ )
17
+ ARTIFACT_DEFINITION_GENERATE_FEATURE_NOT_SUPPORTED_MESSAGE = (
18
+ "calling generate is only supported for CoreArtifactDefinition nodes"
19
+ )
20
+
21
+ HFID_STR_SEPARATOR = "__"