infrahub-server 1.3.0a0__py3-none-any.whl → 1.3.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. infrahub/actions/tasks.py +4 -11
  2. infrahub/branch/__init__.py +0 -0
  3. infrahub/branch/tasks.py +29 -0
  4. infrahub/branch/triggers.py +22 -0
  5. infrahub/cli/db.py +2 -2
  6. infrahub/computed_attribute/gather.py +3 -1
  7. infrahub/computed_attribute/tasks.py +23 -29
  8. infrahub/core/attribute.py +3 -3
  9. infrahub/core/constants/__init__.py +10 -0
  10. infrahub/core/constants/database.py +1 -0
  11. infrahub/core/constants/infrahubkind.py +2 -0
  12. infrahub/core/convert_object_type/conversion.py +1 -1
  13. infrahub/core/diff/query/save.py +67 -40
  14. infrahub/core/diff/query/time_range_query.py +0 -1
  15. infrahub/core/graph/__init__.py +1 -1
  16. infrahub/core/migrations/graph/__init__.py +6 -0
  17. infrahub/core/migrations/graph/m013_convert_git_password_credential.py +0 -2
  18. infrahub/core/migrations/graph/m029_duplicates_cleanup.py +662 -0
  19. infrahub/core/migrations/graph/m030_illegal_edges.py +82 -0
  20. infrahub/core/migrations/query/attribute_add.py +13 -9
  21. infrahub/core/migrations/query/attribute_rename.py +2 -4
  22. infrahub/core/migrations/query/delete_element_in_schema.py +16 -11
  23. infrahub/core/migrations/query/node_duplicate.py +16 -15
  24. infrahub/core/migrations/query/relationship_duplicate.py +16 -12
  25. infrahub/core/migrations/schema/node_attribute_remove.py +1 -2
  26. infrahub/core/migrations/schema/node_remove.py +16 -14
  27. infrahub/core/node/__init__.py +74 -14
  28. infrahub/core/node/base.py +1 -1
  29. infrahub/core/node/resource_manager/ip_address_pool.py +6 -2
  30. infrahub/core/node/resource_manager/ip_prefix_pool.py +6 -2
  31. infrahub/core/node/resource_manager/number_pool.py +31 -5
  32. infrahub/core/node/standard.py +6 -1
  33. infrahub/core/path.py +1 -1
  34. infrahub/core/protocols.py +10 -0
  35. infrahub/core/query/node.py +1 -1
  36. infrahub/core/query/relationship.py +4 -6
  37. infrahub/core/query/standard_node.py +19 -5
  38. infrahub/core/relationship/constraints/peer_relatives.py +72 -0
  39. infrahub/core/relationship/model.py +1 -1
  40. infrahub/core/schema/attribute_parameters.py +129 -5
  41. infrahub/core/schema/attribute_schema.py +62 -14
  42. infrahub/core/schema/basenode_schema.py +2 -2
  43. infrahub/core/schema/definitions/core/__init__.py +16 -2
  44. infrahub/core/schema/definitions/core/group.py +45 -0
  45. infrahub/core/schema/definitions/core/resource_pool.py +29 -0
  46. infrahub/core/schema/definitions/internal.py +25 -4
  47. infrahub/core/schema/generated/attribute_schema.py +12 -5
  48. infrahub/core/schema/generated/relationship_schema.py +6 -1
  49. infrahub/core/schema/manager.py +7 -2
  50. infrahub/core/schema/schema_branch.py +69 -5
  51. infrahub/core/validators/__init__.py +8 -0
  52. infrahub/core/validators/attribute/choices.py +0 -1
  53. infrahub/core/validators/attribute/enum.py +0 -1
  54. infrahub/core/validators/attribute/kind.py +0 -1
  55. infrahub/core/validators/attribute/length.py +0 -1
  56. infrahub/core/validators/attribute/min_max.py +118 -0
  57. infrahub/core/validators/attribute/number_pool.py +106 -0
  58. infrahub/core/validators/attribute/optional.py +0 -2
  59. infrahub/core/validators/attribute/regex.py +0 -1
  60. infrahub/core/validators/enum.py +5 -0
  61. infrahub/core/validators/tasks.py +1 -1
  62. infrahub/database/__init__.py +16 -4
  63. infrahub/database/validation.py +100 -0
  64. infrahub/dependencies/builder/constraint/grouped/node_runner.py +2 -0
  65. infrahub/dependencies/builder/constraint/relationship_manager/peer_relatives.py +8 -0
  66. infrahub/dependencies/builder/diff/deserializer.py +1 -1
  67. infrahub/dependencies/registry.py +2 -0
  68. infrahub/events/models.py +1 -1
  69. infrahub/git/base.py +5 -3
  70. infrahub/git/integrator.py +102 -3
  71. infrahub/graphql/mutations/main.py +1 -1
  72. infrahub/graphql/mutations/resource_manager.py +54 -6
  73. infrahub/graphql/queries/resource_manager.py +7 -1
  74. infrahub/graphql/queries/task.py +10 -0
  75. infrahub/graphql/resolvers/many_relationship.py +1 -1
  76. infrahub/graphql/resolvers/resolver.py +2 -2
  77. infrahub/graphql/resolvers/single_relationship.py +1 -1
  78. infrahub/graphql/types/task_log.py +3 -2
  79. infrahub/menu/menu.py +8 -7
  80. infrahub/message_bus/operations/refresh/registry.py +3 -3
  81. infrahub/patch/queries/delete_duplicated_edges.py +40 -29
  82. infrahub/pools/number.py +5 -3
  83. infrahub/pools/registration.py +22 -0
  84. infrahub/pools/tasks.py +56 -0
  85. infrahub/schema/__init__.py +0 -0
  86. infrahub/schema/tasks.py +27 -0
  87. infrahub/schema/triggers.py +23 -0
  88. infrahub/task_manager/task.py +44 -4
  89. infrahub/trigger/catalogue.py +4 -0
  90. infrahub/trigger/models.py +5 -4
  91. infrahub/trigger/setup.py +26 -2
  92. infrahub/trigger/tasks.py +1 -1
  93. infrahub/types.py +6 -0
  94. infrahub/webhook/tasks.py +6 -9
  95. infrahub/workflows/catalogue.py +27 -1
  96. infrahub_sdk/client.py +43 -10
  97. infrahub_sdk/node/__init__.py +39 -0
  98. infrahub_sdk/node/attribute.py +122 -0
  99. infrahub_sdk/node/constants.py +21 -0
  100. infrahub_sdk/{node.py → node/node.py} +50 -749
  101. infrahub_sdk/node/parsers.py +15 -0
  102. infrahub_sdk/node/property.py +24 -0
  103. infrahub_sdk/node/related_node.py +266 -0
  104. infrahub_sdk/node/relationship.py +302 -0
  105. infrahub_sdk/protocols.py +112 -0
  106. infrahub_sdk/protocols_base.py +34 -2
  107. infrahub_sdk/query_groups.py +13 -2
  108. infrahub_sdk/schema/main.py +1 -0
  109. infrahub_sdk/schema/repository.py +16 -0
  110. infrahub_sdk/spec/object.py +1 -1
  111. infrahub_sdk/store.py +1 -1
  112. infrahub_sdk/testing/schemas/car_person.py +1 -0
  113. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/METADATA +3 -3
  114. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/RECORD +122 -100
  115. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/WHEEL +1 -1
  116. infrahub_testcontainers/container.py +239 -64
  117. infrahub_testcontainers/docker-compose-cluster.test.yml +321 -0
  118. infrahub_testcontainers/docker-compose.test.yml +1 -0
  119. infrahub_testcontainers/helpers.py +15 -1
  120. infrahub_testcontainers/plugin.py +9 -0
  121. infrahub/patch/queries/consolidate_duplicated_nodes.py +0 -106
  122. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/LICENSE.txt +0 -0
  123. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/entry_points.txt +0 -0
infrahub/pools/number.py CHANGED
@@ -8,7 +8,7 @@ from infrahub.core.registry import registry
8
8
 
9
9
  if TYPE_CHECKING:
10
10
  from infrahub.core.branch import Branch
11
- from infrahub.core.protocols import CoreNode
11
+ from infrahub.core.node.resource_manager.number_pool import CoreNumberPool
12
12
  from infrahub.core.timestamp import Timestamp
13
13
  from infrahub.database import InfrahubDatabase
14
14
 
@@ -20,7 +20,9 @@ class UsedNumber:
20
20
 
21
21
 
22
22
  class NumberUtilizationGetter:
23
- def __init__(self, db: InfrahubDatabase, pool: CoreNode, branch: Branch, at: Timestamp | str | None = None) -> None:
23
+ def __init__(
24
+ self, db: InfrahubDatabase, pool: CoreNumberPool, branch: Branch, at: Timestamp | str | None = None
25
+ ) -> None:
24
26
  self.db = db
25
27
  self.at = at
26
28
  self.pool = pool
@@ -62,4 +64,4 @@ class NumberUtilizationGetter:
62
64
 
63
65
  @property
64
66
  def total_pool_size(self) -> int:
65
- return self.end_range - self.start_range + 1
67
+ return self.end_range - self.start_range + 1 - self.pool.get_attribute_nb_excluded_values()
@@ -0,0 +1,22 @@
1
+ from infrahub.core.registry import registry
2
+ from infrahub.exceptions import SchemaNotFoundError
3
+
4
+
5
+ def get_branches_with_schema_number_pool(kind: str, attribute_name: str) -> list[str]:
6
+ """Return branches where schema defined NumberPool exists"""
7
+
8
+ registered_branches = []
9
+ active_branches = registry.schema.get_branches()
10
+
11
+ for active_branch in active_branches:
12
+ try:
13
+ schema = registry.schema.get(name=kind, branch=active_branch)
14
+ except SchemaNotFoundError:
15
+ continue
16
+
17
+ if attribute_name in schema.attribute_names:
18
+ attribute = schema.get_attribute(name=attribute_name)
19
+ if attribute.kind == "NumberPool":
20
+ registered_branches.append(active_branch)
21
+
22
+ return registered_branches
@@ -0,0 +1,56 @@
1
+ from __future__ import annotations
2
+
3
+ from prefect import flow
4
+ from prefect.logging import get_run_logger
5
+
6
+ from infrahub.context import InfrahubContext # noqa: TC001 needed for prefect flow
7
+ from infrahub.core.constants import NumberPoolType
8
+ from infrahub.core.manager import NodeManager
9
+ from infrahub.core.protocols import CoreNumberPool
10
+ from infrahub.core.registry import registry
11
+ from infrahub.core.schema.attribute_parameters import NumberPoolParameters
12
+ from infrahub.pools.registration import get_branches_with_schema_number_pool
13
+ from infrahub.services import InfrahubServices # noqa: TC001 needed for prefect flow
14
+
15
+
16
+ @flow(
17
+ name="validate-schema-number-pools",
18
+ flow_run_name="Validate schema number pools on {branch_name}",
19
+ )
20
+ async def validate_schema_number_pools(
21
+ branch_name: str, # noqa: ARG001
22
+ context: InfrahubContext, # noqa: ARG001
23
+ service: InfrahubServices,
24
+ ) -> None:
25
+ log = get_run_logger()
26
+
27
+ async with service.database.start_session() as dbs:
28
+ schema_number_pools = await NodeManager.query(
29
+ db=dbs, schema=CoreNumberPool, filters={"pool_type__value": NumberPoolType.SCHEMA.value}
30
+ )
31
+
32
+ for schema_number_pool in list(schema_number_pools):
33
+ defined_on_branches = get_branches_with_schema_number_pool(
34
+ kind=schema_number_pool.node.value, attribute_name=schema_number_pool.node_attribute.value
35
+ )
36
+ if registry.default_branch in defined_on_branches:
37
+ schema = registry.schema.get(name=schema_number_pool.node.value, branch=registry.default_branch)
38
+ attribute = schema.get_attribute(name=schema_number_pool.node_attribute.value)
39
+ number_pool_updated = False
40
+ if isinstance(attribute.parameters, NumberPoolParameters):
41
+ if schema_number_pool.start_range.value != attribute.parameters.start_range:
42
+ schema_number_pool.start_range.value = attribute.parameters.start_range
43
+ number_pool_updated = True
44
+ if schema_number_pool.end_range.value != attribute.parameters.end_range:
45
+ schema_number_pool.end_range.value = attribute.parameters.end_range
46
+ number_pool_updated = True
47
+
48
+ if number_pool_updated:
49
+ log.info(
50
+ f"Updating NumberPool={schema_number_pool.id} based on changes in the schema on {registry.default_branch}"
51
+ )
52
+ await schema_number_pool.save(db=service.database)
53
+
54
+ elif not defined_on_branches:
55
+ log.info(f"Deleting number pool (id={schema_number_pool.id}) as it is no longer defined in the schema")
56
+ await schema_number_pool.delete(db=service.database)
File without changes
@@ -0,0 +1,27 @@
1
+ from __future__ import annotations
2
+
3
+ from prefect import flow
4
+ from prefect.logging import get_run_logger
5
+
6
+ from infrahub.context import InfrahubContext # noqa: TC001 needed for prefect flow
7
+ from infrahub.pools.tasks import validate_schema_number_pools
8
+ from infrahub.services import InfrahubServices # noqa: TC001 needed for prefect flow
9
+ from infrahub.workflows.utils import wait_for_schema_to_converge
10
+
11
+
12
+ @flow(
13
+ name="schema-updated",
14
+ flow_run_name="Running actions after the schema was updated on '{branch_name}'",
15
+ )
16
+ async def schema_updated(
17
+ branch_name: str,
18
+ schema_hash: str, # noqa: ARG001
19
+ context: InfrahubContext,
20
+ service: InfrahubServices,
21
+ ) -> None:
22
+ log = get_run_logger()
23
+ await wait_for_schema_to_converge(
24
+ branch_name=branch_name, component=service.component, db=service.database, log=log
25
+ )
26
+
27
+ await validate_schema_number_pools(branch_name=branch_name, context=context, service=service)
@@ -0,0 +1,23 @@
1
+ from infrahub.events.schema_action import SchemaUpdatedEvent
2
+ from infrahub.trigger.models import BuiltinTriggerDefinition, EventTrigger, ExecuteWorkflow
3
+ from infrahub.workflows.catalogue import SCHEMA_UPDATED
4
+
5
+ TRIGGER_SCHEMA_UPDATED = BuiltinTriggerDefinition(
6
+ name="schema-updated-trigger",
7
+ trigger=EventTrigger(
8
+ events={SchemaUpdatedEvent.event_name},
9
+ ),
10
+ actions=[
11
+ ExecuteWorkflow(
12
+ workflow=SCHEMA_UPDATED,
13
+ parameters={
14
+ "branch_name": "{{ event.payload['data']['branch_name'] }}",
15
+ "schema_hash": "{{ event.payload['data']['schema_hash'] }}",
16
+ "context": {
17
+ "__prefect_kind": "json",
18
+ "value": {"__prefect_kind": "jinja", "template": "{{ event.payload['context'] | tojson }}"},
19
+ },
20
+ },
21
+ ),
22
+ ],
23
+ )
@@ -35,6 +35,9 @@ from .models import FlowLogs, FlowProgress, RelatedNodesInfo
35
35
 
36
36
  log = get_logger()
37
37
 
38
+ NB_LOGS_LIMIT = 10_000
39
+ PREFECT_MAX_LOGS_PER_CALL = 200
40
+
38
41
 
39
42
  class PrefectTask:
40
43
  @classmethod
@@ -83,9 +86,42 @@ class PrefectTask:
83
86
  return related_nodes
84
87
 
85
88
  @classmethod
86
- async def _get_logs(cls, client: PrefectClient, flow_ids: list[UUID]) -> FlowLogs:
89
+ async def _get_logs(
90
+ cls, client: PrefectClient, flow_ids: list[UUID], log_limit: int | None, log_offset: int | None
91
+ ) -> FlowLogs:
92
+ """
93
+ Return the logs for a flow run, based on log_limit and log_offset.
94
+ At most, NB_LOGS_LIMIT logs will be returned per flow.
95
+ """
96
+
87
97
  logs_flow = FlowLogs()
88
- all_logs = await client.read_logs(log_filter=LogFilter(flow_run_id=LogFilterFlowRunId(any_=flow_ids)))
98
+
99
+ log_limit = log_limit if log_limit is not None else NB_LOGS_LIMIT
100
+ log_offset = log_offset or 0
101
+ current_offset = log_offset
102
+
103
+ if log_limit > NB_LOGS_LIMIT:
104
+ raise ValueError(f"log_limit cannot be greater than {NB_LOGS_LIMIT}")
105
+
106
+ all_logs = []
107
+
108
+ # Fetch the logs in batches of PREFECT_MAX_LOGS_PER_CALL, as prefect does not allow to fetch more logs at once.
109
+ remaining = min(log_limit, NB_LOGS_LIMIT)
110
+ while remaining > 0:
111
+ batch_limit = min(PREFECT_MAX_LOGS_PER_CALL, remaining)
112
+ logs_batch = await client.read_logs(
113
+ log_filter=LogFilter(flow_run_id=LogFilterFlowRunId(any_=flow_ids)),
114
+ offset=current_offset,
115
+ limit=batch_limit,
116
+ )
117
+ all_logs.extend(logs_batch)
118
+ nb_fetched = len(logs_batch)
119
+ if nb_fetched < batch_limit:
120
+ break # No more logs to fetch
121
+
122
+ current_offset += nb_fetched
123
+ remaining -= nb_fetched
124
+
89
125
  for flow_log in all_logs:
90
126
  if flow_log.flow_run_id and flow_log.message not in ["Finished in state Completed()"]:
91
127
  logs_flow.logs[flow_log.flow_run_id].append(flow_log)
@@ -188,6 +224,8 @@ class PrefectTask:
188
224
  branch: str | None = None,
189
225
  limit: int | None = None,
190
226
  offset: int | None = None,
227
+ log_limit: int | None = None,
228
+ log_offset: int | None = None,
191
229
  ) -> dict[str, Any]:
192
230
  nodes: list[dict] = []
193
231
  count = None
@@ -219,7 +257,9 @@ class PrefectTask:
219
257
  sort=FlowRunSort.START_TIME_DESC,
220
258
  )
221
259
  if log_fields:
222
- logs_flow = await cls._get_logs(client=client, flow_ids=[flow.id for flow in flows])
260
+ logs_flow = await cls._get_logs(
261
+ client=client, flow_ids=[flow.id for flow in flows], log_limit=log_limit, log_offset=log_offset
262
+ )
223
263
 
224
264
  if "progress" in node_fields:
225
265
  progress_flow = await cls._get_progress(client=client, flow_ids=[flow.id for flow in flows])
@@ -265,7 +305,7 @@ class PrefectTask:
265
305
  "updated_at": flow.updated.to_iso8601_string(), # type: ignore
266
306
  "start_time": flow.start_time.to_iso8601_string() if flow.start_time else None,
267
307
  "id": flow.id,
268
- "logs": {"edges": logs},
308
+ "logs": {"edges": logs, "count": len(logs)},
269
309
  }
270
310
  }
271
311
  )
@@ -1,15 +1,19 @@
1
1
  from infrahub.actions.triggers import TRIGGER_ACTION_RULE_UPDATE
2
+ from infrahub.branch.triggers import TRIGGER_BRANCH_MERGED
2
3
  from infrahub.computed_attribute.triggers import (
3
4
  TRIGGER_COMPUTED_ATTRIBUTE_ALL_SCHEMA,
4
5
  TRIGGER_COMPUTED_ATTRIBUTE_PYTHON_SETUP_COMMIT,
5
6
  )
7
+ from infrahub.schema.triggers import TRIGGER_SCHEMA_UPDATED
6
8
  from infrahub.trigger.models import TriggerDefinition
7
9
  from infrahub.webhook.triggers import TRIGGER_WEBHOOK_DELETE, TRIGGER_WEBHOOK_SETUP_UPDATE
8
10
 
9
11
  builtin_triggers: list[TriggerDefinition] = [
10
12
  TRIGGER_ACTION_RULE_UPDATE,
13
+ TRIGGER_BRANCH_MERGED,
11
14
  TRIGGER_COMPUTED_ATTRIBUTE_ALL_SCHEMA,
12
15
  TRIGGER_COMPUTED_ATTRIBUTE_PYTHON_SETUP_COMMIT,
16
+ TRIGGER_SCHEMA_UPDATED,
13
17
  TRIGGER_WEBHOOK_DELETE,
14
18
  TRIGGER_WEBHOOK_SETUP_UPDATE,
15
19
  ]
@@ -5,10 +5,7 @@ from enum import Enum
5
5
  from typing import TYPE_CHECKING, Any
6
6
 
7
7
  from prefect.events.actions import RunDeployment
8
- from prefect.events.schemas.automations import (
9
- Automation, # noqa: TC002
10
- Posture,
11
- )
8
+ from prefect.events.schemas.automations import Automation, Posture
12
9
  from prefect.events.schemas.automations import EventTrigger as PrefectEventTrigger
13
10
  from prefect.events.schemas.events import ResourceSpecification
14
11
  from pydantic import BaseModel, Field
@@ -28,6 +25,10 @@ class TriggerSetupReport(BaseModel):
28
25
  deleted: list[Automation] = Field(default_factory=list)
29
26
  unchanged: list[TriggerDefinition] = Field(default_factory=list)
30
27
 
28
+ @property
29
+ def in_use_count(self) -> int:
30
+ return len(self.created + self.updated + self.unchanged)
31
+
31
32
 
32
33
  class TriggerType(str, Enum):
33
34
  ACTION_TRIGGER_RULE = "action_trigger_rule"
infrahub/trigger/setup.py CHANGED
@@ -1,12 +1,14 @@
1
- from typing import TYPE_CHECKING
1
+ from typing import TYPE_CHECKING, Awaitable, Callable
2
2
 
3
3
  from prefect import get_run_logger, task
4
4
  from prefect.automations import AutomationCore
5
5
  from prefect.cache_policies import NONE
6
- from prefect.client.orchestration import PrefectClient
6
+ from prefect.client.orchestration import PrefectClient, get_client
7
7
  from prefect.client.schemas.filters import DeploymentFilter, DeploymentFilterName
8
8
  from prefect.events.schemas.automations import Automation
9
9
 
10
+ from infrahub import lock
11
+ from infrahub.database import InfrahubDatabase
10
12
  from infrahub.trigger.models import TriggerDefinition
11
13
 
12
14
  from .models import TriggerSetupReport, TriggerType
@@ -27,6 +29,28 @@ def compare_automations(target: AutomationCore, existing: Automation) -> bool:
27
29
  return target_dump == existing_dump
28
30
 
29
31
 
32
+ @task(name="trigger-setup-specific", task_run_name="Setup triggers of a specific kind", cache_policy=NONE) # type: ignore[arg-type]
33
+ async def setup_triggers_specific(
34
+ gatherer: Callable[[InfrahubDatabase | None], Awaitable[list[TriggerDefinition]]],
35
+ trigger_type: TriggerType,
36
+ db: InfrahubDatabase | None = None,
37
+ ) -> TriggerSetupReport:
38
+ async with lock.registry.get(
39
+ name=f"configure-action-rules-{trigger_type.value}", namespace="trigger-rules", local=False
40
+ ):
41
+ if db:
42
+ async with db.start_session(read_only=True) as dbs:
43
+ triggers = await gatherer(dbs)
44
+ else:
45
+ triggers = await gatherer(db)
46
+ async with get_client(sync_client=False) as prefect_client:
47
+ return await setup_triggers(
48
+ client=prefect_client,
49
+ triggers=triggers,
50
+ trigger_type=trigger_type,
51
+ ) # type: ignore[misc]
52
+
53
+
30
54
  @task(name="trigger-setup", task_run_name="Setup triggers", cache_policy=NONE) # type: ignore[arg-type]
31
55
  async def setup_triggers(
32
56
  client: PrefectClient,
infrahub/trigger/tasks.py CHANGED
@@ -6,7 +6,7 @@ from infrahub.computed_attribute.gather import (
6
6
  gather_trigger_computed_attribute_jinja2,
7
7
  gather_trigger_computed_attribute_python,
8
8
  )
9
- from infrahub.services import InfrahubServices # noqa: TC001 needed for prefect flow
9
+ from infrahub.services import InfrahubServices
10
10
  from infrahub.trigger.catalogue import builtin_triggers
11
11
  from infrahub.webhook.gather import gather_trigger_webhook
12
12
 
infrahub/types.py CHANGED
@@ -235,6 +235,10 @@ class Number(InfrahubDataType):
235
235
  infrahub = "Integer"
236
236
 
237
237
 
238
+ class NumberPool(Number):
239
+ label: str = "Number Pool"
240
+
241
+
238
242
  class Bandwidth(InfrahubDataType):
239
243
  label: str = "Bandwidth"
240
244
  graphql = graphene.Int
@@ -329,6 +333,7 @@ ATTRIBUTE_TYPES: dict[str, type[InfrahubDataType]] = {
329
333
  "MacAddress": MacAddress,
330
334
  "Color": Color,
331
335
  "Number": Number,
336
+ "NumberPool": NumberPool,
332
337
  "Bandwidth": Bandwidth,
333
338
  "IPHost": IPHost,
334
339
  "IPNetwork": IPNetwork,
@@ -353,6 +358,7 @@ ATTRIBUTE_PYTHON_TYPES: dict[str, type] = {
353
358
  "MacAddress": str, # MAC addresses can be straightforward strings
354
359
  "Color": str, # Colors often represented as hex strings
355
360
  "Number": float, # Numbers can be floats for general use
361
+ "NumberPool": float, # Numbers can be floats for general use
356
362
  "Bandwidth": float, # Bandwidth in some units, represented as a float
357
363
  "IPHost": IPvAnyAddress, # type: ignore[dict-item]
358
364
  "IPNetwork": str,
infrahub/webhook/tasks.py CHANGED
@@ -14,7 +14,7 @@ from prefect.logging import get_run_logger
14
14
  from infrahub.message_bus.types import KVTTL
15
15
  from infrahub.services import InfrahubServices # noqa: TC001 needed for prefect flow
16
16
  from infrahub.trigger.models import TriggerType
17
- from infrahub.trigger.setup import setup_triggers
17
+ from infrahub.trigger.setup import setup_triggers_specific
18
18
  from infrahub.workflows.utils import add_tags
19
19
 
20
20
  from .gather import gather_trigger_webhook
@@ -111,16 +111,13 @@ async def webhook_process(
111
111
  async def configure_webhook_all(service: InfrahubServices) -> None:
112
112
  log = get_run_logger()
113
113
 
114
- triggers = await gather_trigger_webhook(db=service.database)
115
-
116
- async with get_client(sync_client=False) as prefect_client:
117
- await setup_triggers(
118
- client=prefect_client,
119
- triggers=triggers,
120
- trigger_type=TriggerType.WEBHOOK,
121
- ) # type: ignore[misc]
114
+ async with service.database.start_session(read_only=True) as db:
115
+ triggers = await gather_trigger_webhook(db=db)
122
116
 
123
117
  log.info(f"{len(triggers)} Webhooks automation configuration completed")
118
+ await setup_triggers_specific(
119
+ gatherer=gather_trigger_webhook, db=service.database, trigger_type=TriggerType.WEBHOOK
120
+ ) # type: ignore[misc]
124
121
 
125
122
 
126
123
  @flow(name="webhook-setup-automation-one", flow_run_name="Configurate webhook for {webhook_name}")
@@ -242,6 +242,14 @@ BRANCH_MERGE = WorkflowDefinition(
242
242
  tags=[WorkflowTag.DATABASE_CHANGE],
243
243
  )
244
244
 
245
+ BRANCH_MERGED = WorkflowDefinition(
246
+ name="branch-merged",
247
+ type=WorkflowType.CORE,
248
+ module="infrahub.branch.tasks",
249
+ function="branch_merged",
250
+ tags=[WorkflowTag.DATABASE_CHANGE],
251
+ )
252
+
245
253
  BRANCH_MERGE_POST_PROCESS = WorkflowDefinition(
246
254
  name="branch-merge-post-process",
247
255
  type=WorkflowType.CORE,
@@ -250,7 +258,6 @@ BRANCH_MERGE_POST_PROCESS = WorkflowDefinition(
250
258
  tags=[WorkflowTag.DATABASE_CHANGE],
251
259
  )
252
260
 
253
-
254
261
  BRANCH_MERGE_MUTATION = WorkflowDefinition(
255
262
  name="merge-branch-mutation",
256
263
  type=WorkflowType.CORE,
@@ -498,6 +505,14 @@ GIT_REPOSITORY_MERGE_CONFLICTS_CHECKS_RUN = WorkflowDefinition(
498
505
  function="run_check_merge_conflicts",
499
506
  )
500
507
 
508
+ SCHEMA_UPDATED = WorkflowDefinition(
509
+ name="schema-updated",
510
+ type=WorkflowType.CORE,
511
+ module="infrahub.schema.tasks",
512
+ function="schema_updated",
513
+ )
514
+
515
+
501
516
  TRIGGER_CONFIGURE_ALL = WorkflowDefinition(
502
517
  name="trigger-configure-all",
503
518
  type=WorkflowType.CORE,
@@ -506,6 +521,14 @@ TRIGGER_CONFIGURE_ALL = WorkflowDefinition(
506
521
  )
507
522
 
508
523
 
524
+ VALIDATE_SCHEMA_NUMBER_POOLS = WorkflowDefinition(
525
+ name="validate-schema-number-pools",
526
+ type=WorkflowType.CORE,
527
+ module="infrahub.pools.tasks",
528
+ function="validate_schema_number_pools",
529
+ )
530
+
531
+
509
532
  worker_pools = [INFRAHUB_WORKER_POOL]
510
533
 
511
534
  workflows = [
@@ -517,6 +540,7 @@ workflows = [
517
540
  BRANCH_CREATE,
518
541
  BRANCH_DELETE,
519
542
  BRANCH_MERGE,
543
+ BRANCH_MERGED,
520
544
  BRANCH_MERGE_MUTATION,
521
545
  BRANCH_MERGE_POST_PROCESS,
522
546
  BRANCH_REBASE,
@@ -564,6 +588,7 @@ workflows = [
564
588
  REQUEST_PROPOSED_CHANGE_USER_TESTS,
565
589
  RUN_GENERATOR_AS_CHECK,
566
590
  SCHEMA_APPLY_MIGRATION,
591
+ SCHEMA_UPDATED,
567
592
  SCHEMA_VALIDATE_MIGRATION,
568
593
  TRANSFORM_JINJA2_RENDER,
569
594
  TRANSFORM_PYTHON_RENDER,
@@ -572,6 +597,7 @@ workflows = [
572
597
  TRIGGER_GENERATOR_DEFINITION_RUN,
573
598
  TRIGGER_UPDATE_JINJA_COMPUTED_ATTRIBUTES,
574
599
  TRIGGER_UPDATE_PYTHON_COMPUTED_ATTRIBUTES,
600
+ VALIDATE_SCHEMA_NUMBER_POOLS,
575
601
  WEBHOOK_CONFIGURE_ALL,
576
602
  WEBHOOK_CONFIGURE_ONE,
577
603
  WEBHOOK_DELETE_AUTOMATION,
infrahub_sdk/client.py CHANGED
@@ -172,11 +172,18 @@ class BaseClient:
172
172
  params: dict[str, Any] | None = None,
173
173
  delete_unused_nodes: bool = False,
174
174
  group_type: str | None = None,
175
+ group_params: dict[str, Any] | None = None,
176
+ branch: str | None = None,
175
177
  ) -> Self:
176
178
  self.mode = InfrahubClientMode.TRACKING
177
179
  identifier = identifier or self.identifier or "python-sdk"
178
180
  self.set_context_properties(
179
- identifier=identifier, params=params, delete_unused_nodes=delete_unused_nodes, group_type=group_type
181
+ identifier=identifier,
182
+ params=params,
183
+ delete_unused_nodes=delete_unused_nodes,
184
+ group_type=group_type,
185
+ group_params=group_params,
186
+ branch=branch,
180
187
  )
181
188
  return self
182
189
 
@@ -187,14 +194,22 @@ class BaseClient:
187
194
  delete_unused_nodes: bool = True,
188
195
  reset: bool = True,
189
196
  group_type: str | None = None,
197
+ group_params: dict[str, Any] | None = None,
198
+ branch: str | None = None,
190
199
  ) -> None:
191
200
  if reset:
192
201
  if isinstance(self, InfrahubClient):
193
202
  self.group_context = InfrahubGroupContext(self)
194
203
  elif isinstance(self, InfrahubClientSync):
195
204
  self.group_context = InfrahubGroupContextSync(self)
205
+
196
206
  self.group_context.set_properties(
197
- identifier=identifier, params=params, delete_unused_nodes=delete_unused_nodes, group_type=group_type
207
+ identifier=identifier,
208
+ params=params,
209
+ delete_unused_nodes=delete_unused_nodes,
210
+ group_type=group_type,
211
+ group_params=group_params,
212
+ branch=branch,
198
213
  )
199
214
 
200
215
  def _graphql_url(
@@ -562,18 +577,27 @@ class InfrahubClient(BaseClient):
562
577
  at: Timestamp | None = None,
563
578
  branch: str | None = None,
564
579
  timeout: int | None = None,
580
+ partial_match: bool = False,
565
581
  **kwargs: Any,
566
582
  ) -> int:
567
583
  """Return the number of nodes of a given kind."""
568
- filters = kwargs
569
- schema = await self.schema.get(kind=kind, branch=branch)
584
+ filters: dict[str, Any] = dict(kwargs)
570
585
 
586
+ if partial_match:
587
+ filters["partial_match"] = True
588
+
589
+ schema = await self.schema.get(kind=kind, branch=branch)
571
590
  branch = branch or self.default_branch
572
591
  if at:
573
592
  at = Timestamp(at)
574
593
 
594
+ data: dict[str, Any] = {
595
+ "count": None,
596
+ "@filters": filters,
597
+ }
598
+
575
599
  response = await self.execute_graphql(
576
- query=Query(query={schema.kind: {"count": None, "@filters": filters}}).render(),
600
+ query=Query(query={schema.kind: data}).render(),
577
601
  branch_name=branch,
578
602
  at=at,
579
603
  timeout=timeout,
@@ -801,7 +825,7 @@ class InfrahubClient(BaseClient):
801
825
  nodes = []
802
826
  related_nodes = []
803
827
  batch_process = await self.create_batch()
804
- count = await self.count(kind=schema.kind, **filters)
828
+ count = await self.count(kind=schema.kind, partial_match=partial_match, **filters)
805
829
  total_pages = (count + pagination_size - 1) // pagination_size
806
830
 
807
831
  for page_number in range(1, total_pages + 1):
@@ -1683,18 +1707,27 @@ class InfrahubClientSync(BaseClient):
1683
1707
  at: Timestamp | None = None,
1684
1708
  branch: str | None = None,
1685
1709
  timeout: int | None = None,
1710
+ partial_match: bool = False,
1686
1711
  **kwargs: Any,
1687
1712
  ) -> int:
1688
1713
  """Return the number of nodes of a given kind."""
1689
- filters = kwargs
1690
- schema = self.schema.get(kind=kind, branch=branch)
1714
+ filters: dict[str, Any] = dict(kwargs)
1715
+
1716
+ if partial_match:
1717
+ filters["partial_match"] = True
1691
1718
 
1719
+ schema = self.schema.get(kind=kind, branch=branch)
1692
1720
  branch = branch or self.default_branch
1693
1721
  if at:
1694
1722
  at = Timestamp(at)
1695
1723
 
1724
+ data: dict[str, Any] = {
1725
+ "count": None,
1726
+ "@filters": filters,
1727
+ }
1728
+
1696
1729
  response = self.execute_graphql(
1697
- query=Query(query={schema.kind: {"count": None, "@filters": filters}}).render(),
1730
+ query=Query(query={schema.kind: data}).render(),
1698
1731
  branch_name=branch,
1699
1732
  at=at,
1700
1733
  timeout=timeout,
@@ -1957,7 +1990,7 @@ class InfrahubClientSync(BaseClient):
1957
1990
  related_nodes = []
1958
1991
  batch_process = self.create_batch()
1959
1992
 
1960
- count = self.count(kind=schema.kind, **filters)
1993
+ count = self.count(kind=schema.kind, partial_match=partial_match, **filters)
1961
1994
  total_pages = (count + pagination_size - 1) // pagination_size
1962
1995
 
1963
1996
  for page_number in range(1, total_pages + 1):
@@ -0,0 +1,39 @@
1
+ from __future__ import annotations
2
+
3
+ from .constants import (
4
+ ARTIFACT_DEFINITION_GENERATE_FEATURE_NOT_SUPPORTED_MESSAGE,
5
+ ARTIFACT_FETCH_FEATURE_NOT_SUPPORTED_MESSAGE,
6
+ ARTIFACT_GENERATE_FEATURE_NOT_SUPPORTED_MESSAGE,
7
+ HFID_STR_SEPARATOR,
8
+ IP_TYPES,
9
+ PROPERTIES_FLAG,
10
+ PROPERTIES_OBJECT,
11
+ SAFE_VALUE,
12
+ )
13
+ from .node import InfrahubNode, InfrahubNodeBase, InfrahubNodeSync
14
+ from .parsers import parse_human_friendly_id
15
+ from .property import NodeProperty
16
+ from .related_node import RelatedNode, RelatedNodeBase, RelatedNodeSync
17
+ from .relationship import RelationshipManager, RelationshipManagerBase, RelationshipManagerSync
18
+
19
+ __all__ = [
20
+ "ARTIFACT_DEFINITION_GENERATE_FEATURE_NOT_SUPPORTED_MESSAGE",
21
+ "ARTIFACT_FETCH_FEATURE_NOT_SUPPORTED_MESSAGE",
22
+ "ARTIFACT_GENERATE_FEATURE_NOT_SUPPORTED_MESSAGE",
23
+ "HFID_STR_SEPARATOR",
24
+ "IP_TYPES",
25
+ "PROPERTIES_FLAG",
26
+ "PROPERTIES_OBJECT",
27
+ "SAFE_VALUE",
28
+ "InfrahubNode",
29
+ "InfrahubNodeBase",
30
+ "InfrahubNodeSync",
31
+ "NodeProperty",
32
+ "RelatedNode",
33
+ "RelatedNodeBase",
34
+ "RelatedNodeSync",
35
+ "RelationshipManager",
36
+ "RelationshipManagerBase",
37
+ "RelationshipManagerSync",
38
+ "parse_human_friendly_id",
39
+ ]