infrahub-server 1.5.0b1__py3-none-any.whl → 1.5.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. infrahub/api/internal.py +2 -0
  2. infrahub/api/oauth2.py +13 -19
  3. infrahub/api/oidc.py +15 -21
  4. infrahub/api/schema.py +24 -3
  5. infrahub/artifacts/models.py +2 -1
  6. infrahub/auth.py +137 -3
  7. infrahub/cli/__init__.py +2 -0
  8. infrahub/cli/db.py +83 -102
  9. infrahub/cli/dev.py +118 -0
  10. infrahub/cli/tasks.py +46 -0
  11. infrahub/cli/upgrade.py +30 -3
  12. infrahub/computed_attribute/tasks.py +20 -8
  13. infrahub/core/attribute.py +10 -2
  14. infrahub/core/branch/enums.py +1 -1
  15. infrahub/core/branch/models.py +7 -3
  16. infrahub/core/branch/tasks.py +68 -7
  17. infrahub/core/constants/__init__.py +3 -0
  18. infrahub/core/diff/query/artifact.py +1 -0
  19. infrahub/core/diff/query/field_summary.py +1 -0
  20. infrahub/core/graph/__init__.py +1 -1
  21. infrahub/core/initialization.py +5 -2
  22. infrahub/core/migrations/__init__.py +3 -0
  23. infrahub/core/migrations/exceptions.py +4 -0
  24. infrahub/core/migrations/graph/__init__.py +10 -13
  25. infrahub/core/migrations/graph/load_schema_branch.py +21 -0
  26. infrahub/core/migrations/graph/m013_convert_git_password_credential.py +1 -1
  27. infrahub/core/migrations/graph/m040_duplicated_attributes.py +81 -0
  28. infrahub/core/migrations/graph/m041_profile_attrs_in_db.py +145 -0
  29. infrahub/core/migrations/graph/m042_create_hfid_display_label_in_db.py +164 -0
  30. infrahub/core/migrations/graph/m043_backfill_hfid_display_label_in_db.py +866 -0
  31. infrahub/core/migrations/query/__init__.py +7 -8
  32. infrahub/core/migrations/query/attribute_add.py +8 -6
  33. infrahub/core/migrations/query/attribute_remove.py +134 -0
  34. infrahub/core/migrations/runner.py +54 -0
  35. infrahub/core/migrations/schema/attribute_kind_update.py +9 -3
  36. infrahub/core/migrations/schema/attribute_supports_profile.py +90 -0
  37. infrahub/core/migrations/schema/node_attribute_add.py +30 -2
  38. infrahub/core/migrations/schema/node_attribute_remove.py +13 -109
  39. infrahub/core/migrations/schema/node_kind_update.py +2 -1
  40. infrahub/core/migrations/schema/node_remove.py +2 -1
  41. infrahub/core/migrations/schema/placeholder_dummy.py +3 -2
  42. infrahub/core/migrations/shared.py +48 -14
  43. infrahub/core/node/__init__.py +16 -11
  44. infrahub/core/node/create.py +46 -63
  45. infrahub/core/node/lock_utils.py +70 -44
  46. infrahub/core/node/resource_manager/ip_address_pool.py +2 -1
  47. infrahub/core/node/resource_manager/ip_prefix_pool.py +2 -1
  48. infrahub/core/node/resource_manager/number_pool.py +2 -1
  49. infrahub/core/query/attribute.py +55 -0
  50. infrahub/core/query/ipam.py +1 -0
  51. infrahub/core/query/node.py +9 -3
  52. infrahub/core/query/relationship.py +1 -0
  53. infrahub/core/schema/__init__.py +56 -0
  54. infrahub/core/schema/attribute_schema.py +4 -0
  55. infrahub/core/schema/definitions/internal.py +2 -2
  56. infrahub/core/schema/generated/attribute_schema.py +2 -2
  57. infrahub/core/schema/manager.py +22 -1
  58. infrahub/core/schema/schema_branch.py +180 -22
  59. infrahub/database/graph.py +21 -0
  60. infrahub/display_labels/tasks.py +13 -7
  61. infrahub/events/branch_action.py +27 -1
  62. infrahub/generators/tasks.py +3 -7
  63. infrahub/git/base.py +4 -1
  64. infrahub/git/integrator.py +1 -1
  65. infrahub/git/models.py +2 -1
  66. infrahub/git/repository.py +22 -5
  67. infrahub/git/tasks.py +66 -10
  68. infrahub/git/utils.py +123 -1
  69. infrahub/graphql/api/endpoints.py +14 -4
  70. infrahub/graphql/manager.py +4 -9
  71. infrahub/graphql/mutations/convert_object_type.py +11 -1
  72. infrahub/graphql/mutations/display_label.py +17 -10
  73. infrahub/graphql/mutations/hfid.py +17 -10
  74. infrahub/graphql/mutations/ipam.py +54 -35
  75. infrahub/graphql/mutations/main.py +27 -28
  76. infrahub/graphql/schema_sort.py +170 -0
  77. infrahub/graphql/types/branch.py +4 -1
  78. infrahub/graphql/types/enums.py +3 -0
  79. infrahub/hfid/tasks.py +13 -7
  80. infrahub/lock.py +52 -12
  81. infrahub/message_bus/types.py +2 -1
  82. infrahub/permissions/constants.py +2 -0
  83. infrahub/proposed_change/tasks.py +25 -16
  84. infrahub/server.py +6 -2
  85. infrahub/services/__init__.py +2 -2
  86. infrahub/services/adapters/http/__init__.py +5 -0
  87. infrahub/services/adapters/workflow/worker.py +14 -3
  88. infrahub/task_manager/event.py +5 -0
  89. infrahub/task_manager/models.py +7 -0
  90. infrahub/task_manager/task.py +73 -0
  91. infrahub/trigger/setup.py +13 -4
  92. infrahub/trigger/tasks.py +3 -0
  93. infrahub/workers/dependencies.py +10 -1
  94. infrahub/workers/infrahub_async.py +10 -2
  95. infrahub/workflows/catalogue.py +8 -0
  96. infrahub/workflows/initialization.py +5 -0
  97. infrahub/workflows/utils.py +2 -1
  98. infrahub_sdk/client.py +13 -10
  99. infrahub_sdk/config.py +29 -2
  100. infrahub_sdk/ctl/schema.py +22 -7
  101. infrahub_sdk/schema/__init__.py +32 -4
  102. infrahub_sdk/spec/models.py +7 -0
  103. infrahub_sdk/spec/object.py +37 -102
  104. infrahub_sdk/spec/processors/__init__.py +0 -0
  105. infrahub_sdk/spec/processors/data_processor.py +10 -0
  106. infrahub_sdk/spec/processors/factory.py +34 -0
  107. infrahub_sdk/spec/processors/range_expand_processor.py +56 -0
  108. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/METADATA +3 -1
  109. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/RECORD +115 -101
  110. infrahub_testcontainers/container.py +114 -2
  111. infrahub_testcontainers/docker-compose-cluster.test.yml +5 -0
  112. infrahub_testcontainers/docker-compose.test.yml +5 -0
  113. infrahub/core/migrations/graph/m040_profile_attrs_in_db.py +0 -166
  114. infrahub/core/migrations/graph/m041_create_hfid_display_label_in_db.py +0 -97
  115. infrahub/core/migrations/graph/m042_backfill_hfid_display_label_in_db.py +0 -86
  116. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/LICENSE.txt +0 -0
  117. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/WHEEL +0 -0
  118. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/entry_points.txt +0 -0
@@ -8,7 +8,7 @@ from pathlib import Path
8
8
  from typing import TYPE_CHECKING
9
9
 
10
10
  import pytest
11
- from infrahub_sdk.exceptions import ModuleImportError
11
+ from infrahub_sdk.exceptions import ModuleImportError, NodeNotFoundError, URLNotFoundError
12
12
  from infrahub_sdk.node import InfrahubNode
13
13
  from infrahub_sdk.protocols import (
14
14
  CoreArtifactValidator,
@@ -44,7 +44,7 @@ from infrahub.core.diff.model.diff import DiffElementType, SchemaConflict
44
44
  from infrahub.core.diff.model.path import NodeDiffFieldSummary
45
45
  from infrahub.core.integrity.object_conflict.conflict_recorder import ObjectConflictValidatorRecorder
46
46
  from infrahub.core.manager import NodeManager
47
- from infrahub.core.protocols import CoreDataCheck, CoreValidator
47
+ from infrahub.core.protocols import CoreArtifactDefinition, CoreDataCheck, CoreValidator
48
48
  from infrahub.core.protocols import CoreProposedChange as InternalCoreProposedChange
49
49
  from infrahub.core.timestamp import Timestamp
50
50
  from infrahub.core.validators.checks_runner import run_checks_and_update_validator
@@ -58,6 +58,7 @@ from infrahub.generators.models import ProposedChangeGeneratorDefinition
58
58
  from infrahub.git.base import extract_repo_file_information
59
59
  from infrahub.git.models import TriggerRepositoryInternalChecks, TriggerRepositoryUserChecks
60
60
  from infrahub.git.repository import InfrahubRepository, get_initialized_repo
61
+ from infrahub.git.utils import fetch_artifact_definition_targets, fetch_proposed_change_generator_definition_targets
61
62
  from infrahub.log import get_logger
62
63
  from infrahub.message_bus.types import (
63
64
  ProposedChangeArtifactDefinition,
@@ -528,7 +529,11 @@ async def run_proposed_change_user_tests(model: RequestProposedChangeUserTests)
528
529
  log = get_run_logger()
529
530
  client = get_client()
530
531
 
531
- proposed_change = await client.get(kind=InfrahubKind.PROPOSEDCHANGE, id=model.proposed_change)
532
+ try:
533
+ proposed_change = await client.get(kind=CoreProposedChange, id=model.proposed_change)
534
+ except NodeNotFoundError:
535
+ log.warning(f"Proposed change ({model.proposed_change}) not found, skipping user tests execution")
536
+ return
532
537
 
533
538
  def _execute(
534
539
  directory: Path, repository: ProposedChangeRepository, proposed_change: InfrahubNode
@@ -616,7 +621,7 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
616
621
  client = get_client()
617
622
 
618
623
  artifact_definition = await client.get(
619
- kind=InfrahubKind.ARTIFACTDEFINITION,
624
+ kind=CoreArtifactDefinition,
620
625
  id=model.artifact_definition.definition_id,
621
626
  branch=model.source_branch,
622
627
  )
@@ -656,9 +661,9 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
656
661
  branch=model.source_branch,
657
662
  )
658
663
 
659
- await artifact_definition.targets.fetch()
660
- group = artifact_definition.targets.peer
661
- await group.members.fetch()
664
+ group = await fetch_artifact_definition_targets(
665
+ client=client, branch=model.source_branch, definition=artifact_definition
666
+ )
662
667
 
663
668
  artifacts_by_member = {}
664
669
  for artifact in existing_artifacts:
@@ -695,6 +700,7 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
695
700
  repository_kind=repository.kind,
696
701
  branch_name=model.source_branch,
697
702
  query=model.artifact_definition.query_name,
703
+ query_id=model.artifact_definition.query_id,
698
704
  variables=await member.extract(params=artifact_definition.parameters.value),
699
705
  target_id=member.id,
700
706
  target_kind=member.get_kind(),
@@ -925,14 +931,9 @@ async def request_generator_definition_check(model: RequestGeneratorDefinitionCh
925
931
  branch=model.source_branch,
926
932
  )
927
933
 
928
- group = await client.get(
929
- kind=InfrahubKind.GENERICGROUP,
930
- prefetch_relationships=True,
931
- populate_store=True,
932
- id=model.generator_definition.group_id,
933
- branch=model.source_branch,
934
+ group = await fetch_proposed_change_generator_definition_targets(
935
+ client=client, branch=model.source_branch, definition=model.generator_definition
934
936
  )
935
- await group.members.fetch()
936
937
 
937
938
  instance_by_member = {}
938
939
  for instance in existing_instances:
@@ -1254,6 +1255,7 @@ query GatherArtifactDefinitions {
1254
1255
  }
1255
1256
  query {
1256
1257
  node {
1258
+ id
1257
1259
  models {
1258
1260
  value
1259
1261
  }
@@ -1475,6 +1477,7 @@ def _parse_artifact_definitions(definitions: list[dict]) -> list[ProposedChangeA
1475
1477
  content_type=definition["node"]["content_type"]["value"],
1476
1478
  timeout=definition["node"]["transformation"]["node"]["timeout"]["value"],
1477
1479
  query_name=definition["node"]["transformation"]["node"]["query"]["node"]["name"]["value"],
1480
+ query_id=definition["node"]["transformation"]["node"]["query"]["node"]["id"],
1478
1481
  query_models=definition["node"]["transformation"]["node"]["query"]["node"]["models"]["value"] or [],
1479
1482
  repository_id=definition["node"]["transformation"]["node"]["repository"]["node"]["id"],
1480
1483
  transform_kind=definition["node"]["transformation"]["node"]["__typename"],
@@ -1499,8 +1502,14 @@ async def _get_proposed_change_repositories(
1499
1502
  destination_all = await client.execute_graphql(
1500
1503
  query=DESTINATION_ALLREPOSITORIES, branch_name=model.destination_branch
1501
1504
  )
1502
- source_managed = await client.execute_graphql(query=SOURCE_REPOSITORIES, branch_name=model.source_branch)
1503
- source_readonly = await client.execute_graphql(query=SOURCE_READONLY_REPOSITORIES, branch_name=model.source_branch)
1505
+ try:
1506
+ source_managed = await client.execute_graphql(query=SOURCE_REPOSITORIES, branch_name=model.source_branch)
1507
+ source_readonly = await client.execute_graphql(
1508
+ query=SOURCE_READONLY_REPOSITORIES, branch_name=model.source_branch
1509
+ )
1510
+ except URLNotFoundError:
1511
+ # If the URL is not found it means that the source branch has been deleted after the proposed change was created
1512
+ return []
1504
1513
 
1505
1514
  destination_all = destination_all[InfrahubKind.GENERICREPOSITORY]["edges"]
1506
1515
  source_all = (
infrahub/server.py CHANGED
@@ -24,6 +24,7 @@ from infrahub.api.exception_handlers import generic_api_exception_handler
24
24
  from infrahub.components import ComponentType
25
25
  from infrahub.constants.environment import INSTALLATION_TYPE
26
26
  from infrahub.core.initialization import initialization
27
+ from infrahub.database.graph import validate_graph_version
27
28
  from infrahub.dependencies.registry import build_component_registry
28
29
  from infrahub.exceptions import Error, ValidationError
29
30
  from infrahub.graphql.api.endpoints import router as graphql_router
@@ -83,10 +84,13 @@ async def app_initialization(application: FastAPI, enable_scheduler: bool = True
83
84
  initialize_lock(service=service)
84
85
  # We must initialize DB after initialize lock and initialize lock depends on cache initialization
85
86
  async with application.state.db.start_session() as db:
86
- await initialization(db=db, add_database_indexes=True)
87
+ is_initial_setup = await initialization(db=db, add_database_indexes=True)
88
+
89
+ async with database.start_session() as dbs:
90
+ await validate_graph_version(db=dbs)
87
91
 
88
92
  # Initialize the workflow after the registry has been setup
89
- await service.initialize_workflow()
93
+ await service.initialize_workflow(is_initial_setup=is_initial_setup)
90
94
 
91
95
  application.state.service = service
92
96
  application.state.response_delay = config.SETTINGS.miscellaneous.response_delay
@@ -112,7 +112,7 @@ class InfrahubServices:
112
112
 
113
113
  return service
114
114
 
115
- async def initialize_workflow(self) -> None:
115
+ async def initialize_workflow(self, is_initial_setup: bool = False) -> None:
116
116
  if self.workflow is not None and isinstance(self.workflow, WorkflowWorkerExecution):
117
117
  assert self.component is not None
118
118
  # Ideally `WorkflowWorkerExecution.initialize` would be directly part of WorkflowWorkerExecution
@@ -120,7 +120,7 @@ class InfrahubServices:
120
120
  # after workflow instantiation.
121
121
  await self.component.refresh_heartbeat()
122
122
  is_primary = await self.component.is_primary_gunicorn_worker()
123
- await self.workflow.initialize(component_is_primary_server=is_primary)
123
+ await self.workflow.initialize(component_is_primary_server=is_primary, is_initial_setup=is_initial_setup)
124
124
 
125
125
  @property
126
126
  def component(self) -> InfrahubComponent:
@@ -3,10 +3,15 @@ from __future__ import annotations
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
5
  if TYPE_CHECKING:
6
+ import ssl
7
+
6
8
  import httpx
7
9
 
8
10
 
9
11
  class InfrahubHTTP:
12
+ def verify_tls(self, verify: bool | None = None) -> bool | ssl.SSLContext:
13
+ raise NotImplementedError()
14
+
10
15
  async def get(
11
16
  self,
12
17
  url: str,
@@ -3,10 +3,12 @@ from __future__ import annotations
3
3
  from typing import TYPE_CHECKING, Any, overload
4
4
 
5
5
  from prefect.client.schemas.objects import StateType
6
+ from prefect.context import AsyncClientContext
6
7
  from prefect.deployments import run_deployment
7
8
 
9
+ from infrahub.services.adapters.http.httpx import HttpxAdapter
8
10
  from infrahub.workers.utils import inject_context_parameter
9
- from infrahub.workflows.initialization import setup_task_manager
11
+ from infrahub.workflows.initialization import setup_task_manager, setup_task_manager_identifiers
10
12
  from infrahub.workflows.models import WorkflowInfo
11
13
 
12
14
  from . import InfrahubWorkflow, Return
@@ -19,11 +21,19 @@ if TYPE_CHECKING:
19
21
 
20
22
 
21
23
  class WorkflowWorkerExecution(InfrahubWorkflow):
24
+ # This is required to grab a cached SSLContext from the HttpAdapter.
25
+ # We cannot use the get_http() dependency since it introduces a circular dependency.
26
+ # We could remove this later on by introducing a cached SSLContext outside of this adapter.
27
+ _http_adapter = HttpxAdapter()
28
+
22
29
  @staticmethod
23
- async def initialize(component_is_primary_server: bool) -> None:
30
+ async def initialize(component_is_primary_server: bool, is_initial_setup: bool = False) -> None:
24
31
  if component_is_primary_server:
25
32
  await setup_task_manager()
26
33
 
34
+ if is_initial_setup:
35
+ await setup_task_manager_identifiers()
36
+
27
37
  @overload
28
38
  async def execute_workflow(
29
39
  self,
@@ -79,5 +89,6 @@ class WorkflowWorkerExecution(InfrahubWorkflow):
79
89
  parameters = dict(parameters) if parameters is not None else {}
80
90
  inject_context_parameter(func=flow_func, parameters=parameters, context=context)
81
91
 
82
- flow_run = await run_deployment(name=workflow.full_name, timeout=0, parameters=parameters or {}, tags=tags) # type: ignore[return-value, misc]
92
+ async with AsyncClientContext(httpx_settings={"verify": self._http_adapter.verify_tls()}):
93
+ flow_run = await run_deployment(name=workflow.full_name, timeout=0, parameters=parameters or {}, tags=tags) # type: ignore[return-value, misc]
83
94
  return WorkflowInfo.from_flow(flow_run=flow_run)
@@ -160,6 +160,9 @@ class PrefectEventData(PrefectEventModel):
160
160
  def _return_branch_rebased(self) -> dict[str, Any]:
161
161
  return {"rebased_branch": self._get_branch_name_from_resource()}
162
162
 
163
+ def _return_branch_migrated(self) -> dict[str, Any]:
164
+ return {"migrated_branch": self._get_branch_name_from_resource()}
165
+
163
166
  def _return_group_event(self) -> dict[str, Any]:
164
167
  members = []
165
168
  ancestors = []
@@ -228,6 +231,8 @@ class PrefectEventData(PrefectEventModel):
228
231
  event_specifics = self._return_branch_deleted()
229
232
  case "infrahub.branch.merged":
230
233
  event_specifics = self._return_branch_merged()
234
+ case "infrahub.branch.migrated":
235
+ event_specifics = self._return_branch_migrated()
231
236
  case "infrahub.branch.rebased":
232
237
  event_specifics = self._return_branch_rebased()
233
238
  case "infrahub.group.member_added" | "infrahub.group.member_removed":
@@ -141,6 +141,13 @@ class InfrahubEventFilter(EventFilter):
141
141
  if branches:
142
142
  self.resource = EventResourceFilter(labels=ResourceSpecification({"infrahub.branch.name": branches}))
143
143
 
144
+ if branch_migrated := event_type_filter.get("branch_migrated"):
145
+ branches = branch_migrated.get("branches") or []
146
+ if "infrahub.branch.created" not in event_type:
147
+ event_type.append("infrahub.branch.migrated")
148
+ if branches:
149
+ self.resource = EventResourceFilter(labels=ResourceSpecification({"infrahub.branch.name": branches}))
150
+
144
151
  if branch_rebased := event_type_filter.get("branch_rebased"):
145
152
  branches = branch_rebased.get("branches") or []
146
153
  if "infrahub.branch.created" not in event_type:
@@ -1,7 +1,10 @@
1
+ import asyncio
1
2
  import uuid
3
+ from datetime import datetime, timedelta, timezone
2
4
  from typing import Any
3
5
  from uuid import UUID
4
6
 
7
+ from prefect import State
5
8
  from prefect.client.orchestration import PrefectClient, get_client
6
9
  from prefect.client.schemas.filters import (
7
10
  ArtifactFilter,
@@ -12,6 +15,7 @@ from prefect.client.schemas.filters import (
12
15
  FlowRunFilter,
13
16
  FlowRunFilterId,
14
17
  FlowRunFilterName,
18
+ FlowRunFilterStartTime,
15
19
  FlowRunFilterState,
16
20
  FlowRunFilterStateType,
17
21
  FlowRunFilterTags,
@@ -311,3 +315,72 @@ class PrefectTask:
311
315
  )
312
316
 
313
317
  return {"count": count or 0, "edges": nodes}
318
+
319
+ @classmethod
320
+ async def delete_flow_runs(
321
+ cls,
322
+ states: list[StateType] = [StateType.COMPLETED, StateType.FAILED, StateType.CANCELLED], # noqa: B006
323
+ delete: bool = True,
324
+ days_to_keep: int = 2,
325
+ batch_size: int = 100,
326
+ ) -> None:
327
+ """Delete flow runs in the specified states and older than specified days."""
328
+
329
+ logger = get_logger()
330
+
331
+ async with get_client(sync_client=False) as client:
332
+ cutoff = datetime.now(timezone.utc) - timedelta(days=days_to_keep)
333
+
334
+ flow_run_filter = FlowRunFilter(
335
+ start_time=FlowRunFilterStartTime(before_=cutoff), # type: ignore[arg-type]
336
+ state=FlowRunFilterState(type=FlowRunFilterStateType(any_=states)),
337
+ )
338
+
339
+ # Get flow runs to delete
340
+ flow_runs = await client.read_flow_runs(flow_run_filter=flow_run_filter, limit=batch_size)
341
+
342
+ deleted_total = 0
343
+
344
+ while True:
345
+ batch_deleted = 0
346
+ failed_deletes = []
347
+
348
+ # Delete each flow run through the API
349
+ for flow_run in flow_runs:
350
+ try:
351
+ if delete:
352
+ await client.delete_flow_run(flow_run_id=flow_run.id)
353
+ else:
354
+ await client.set_flow_run_state(
355
+ flow_run_id=flow_run.id,
356
+ state=State(type=StateType.CRASHED),
357
+ force=True,
358
+ )
359
+ deleted_total += 1
360
+ batch_deleted += 1
361
+ except Exception as e:
362
+ logger.warning(f"Failed to delete flow run {flow_run.id}: {e}")
363
+ failed_deletes.append(flow_run.id)
364
+
365
+ # Rate limiting
366
+ if batch_deleted % 10 == 0:
367
+ await asyncio.sleep(0.5)
368
+
369
+ logger.info(f"Delete {batch_deleted}/{len(flow_runs)} flow runs (total: {deleted_total})")
370
+
371
+ # Get next batch
372
+ previous_flow_run_ids = [fr.id for fr in flow_runs]
373
+ flow_runs = await client.read_flow_runs(flow_run_filter=flow_run_filter, limit=batch_size)
374
+
375
+ if not flow_runs:
376
+ logger.info("No more flow runs to delete")
377
+ break
378
+
379
+ if previous_flow_run_ids == [fr.id for fr in flow_runs]:
380
+ logger.info("Found same flow runs to delete, aborting")
381
+ break
382
+
383
+ # Delay between batches to avoid overwhelming the API
384
+ await asyncio.sleep(1.0)
385
+
386
+ logger.info(f"Retention complete. Total deleted tasks: {deleted_total}")
infrahub/trigger/setup.py CHANGED
@@ -6,6 +6,7 @@ from prefect.cache_policies import NONE
6
6
  from prefect.client.orchestration import PrefectClient, get_client
7
7
  from prefect.client.schemas.filters import DeploymentFilter, DeploymentFilterName
8
8
  from prefect.events.schemas.automations import Automation
9
+ from prefect.exceptions import PrefectHTTPStatusError
9
10
 
10
11
  from infrahub import lock
11
12
  from infrahub.database import InfrahubDatabase
@@ -51,7 +52,7 @@ async def setup_triggers_specific(
51
52
  ) # type: ignore[misc]
52
53
 
53
54
 
54
- @task(name="trigger-setup", task_run_name="Setup triggers", cache_policy=NONE) # type: ignore[arg-type]
55
+ @task(name="trigger-setup", task_run_name="Setup triggers", cache_policy=NONE)
55
56
  async def setup_triggers(
56
57
  client: PrefectClient,
57
58
  triggers: list[TriggerDefinition],
@@ -83,7 +84,9 @@ async def setup_triggers(
83
84
  existing_automations: dict[str, Automation] = {}
84
85
  if trigger_type:
85
86
  existing_automations = {
86
- item.name: item for item in await client.read_automations() if item.name.startswith(trigger_type.value)
87
+ item.name: item
88
+ for item in await client.read_automations()
89
+ if item.name.startswith(f"{trigger_type.value}::")
87
90
  }
88
91
  else:
89
92
  existing_automations = {item.name: item for item in await client.read_automations()}
@@ -133,8 +136,14 @@ async def setup_triggers(
133
136
  continue
134
137
 
135
138
  report.deleted.append(existing_automation)
136
- await client.delete_automation(automation_id=existing_automation.id)
137
- log.info(f"{item_to_delete} Deleted")
139
+ try:
140
+ await client.delete_automation(automation_id=existing_automation.id)
141
+ log.info(f"{item_to_delete} Deleted")
142
+ except PrefectHTTPStatusError as exc:
143
+ if exc.response.status_code == 404:
144
+ log.info(f"{item_to_delete} was already deleted")
145
+ else:
146
+ raise
138
147
 
139
148
  if trigger_type:
140
149
  log.info(
infrahub/trigger/tasks.py CHANGED
@@ -7,6 +7,7 @@ from infrahub.computed_attribute.gather import (
7
7
  gather_trigger_computed_attribute_python,
8
8
  )
9
9
  from infrahub.display_labels.gather import gather_trigger_display_labels_jinja2
10
+ from infrahub.hfid.gather import gather_trigger_hfid
10
11
  from infrahub.trigger.catalogue import builtin_triggers
11
12
  from infrahub.webhook.gather import gather_trigger_webhook
12
13
  from infrahub.workers.dependencies import get_database
@@ -20,6 +21,7 @@ async def trigger_configure_all() -> None:
20
21
  async with database.start_session() as db:
21
22
  webhook_trigger = await gather_trigger_webhook(db=db)
22
23
  display_label_triggers = await gather_trigger_display_labels_jinja2()
24
+ human_friendly_id_triggers = await gather_trigger_hfid()
23
25
  computed_attribute_j2_triggers = await gather_trigger_computed_attribute_jinja2()
24
26
  (
25
27
  computed_attribute_python_triggers,
@@ -31,6 +33,7 @@ async def trigger_configure_all() -> None:
31
33
  + computed_attribute_python_triggers
32
34
  + computed_attribute_python_query_triggers
33
35
  + display_label_triggers
36
+ + human_friendly_id_triggers
34
37
  + builtin_triggers
35
38
  + webhook_trigger
36
39
  + action_rules
@@ -7,6 +7,7 @@ from infrahub_sdk.config import Config
7
7
  from infrahub import config
8
8
  from infrahub.components import ComponentType
9
9
  from infrahub.constants.environment import INSTALLATION_TYPE
10
+ from infrahub.core.registry import registry
10
11
  from infrahub.database import InfrahubDatabase, get_db
11
12
  from infrahub.services.adapters.cache import InfrahubCache
12
13
  from infrahub.services.adapters.event import InfrahubEventService
@@ -34,7 +35,15 @@ def get_component_type() -> ComponentType:
34
35
 
35
36
 
36
37
  def build_client() -> InfrahubClient:
37
- return InfrahubClient(config=Config(address=config.SETTINGS.main.internal_address, retry_on_failure=True))
38
+ client_config = Config(address=config.SETTINGS.main.internal_address, retry_on_failure=True)
39
+ client_config.set_ssl_context(context=get_http().verify_tls())
40
+ client = InfrahubClient(config=client_config)
41
+ # Populate client schema cache using our internal schema cache
42
+ if registry.schema:
43
+ for branch in registry.schema.get_branches():
44
+ client.schema.set_cache(schema=registry.schema.get_sdk_schema_branch(name=branch), branch=branch)
45
+
46
+ return client
38
47
 
39
48
 
40
49
  @inject
@@ -8,6 +8,7 @@ from infrahub_sdk import Config, InfrahubClient
8
8
  from infrahub_sdk.exceptions import Error as SdkError
9
9
  from prefect import settings as prefect_settings
10
10
  from prefect.client.schemas.objects import FlowRun
11
+ from prefect.context import AsyncClientContext
11
12
  from prefect.flow_engine import run_flow_async
12
13
  from prefect.logging.handlers import APILogHandler
13
14
  from prefect.workers.base import BaseJobConfiguration, BaseVariables, BaseWorker, BaseWorkerResult
@@ -18,6 +19,7 @@ from infrahub import config
18
19
  from infrahub.components import ComponentType
19
20
  from infrahub.core import registry
20
21
  from infrahub.core.initialization import initialization
22
+ from infrahub.database.graph import validate_graph_version
21
23
  from infrahub.dependencies.registry import build_component_registry
22
24
  from infrahub.git import initialize_repositories_directory
23
25
  from infrahub.lock import initialize_lock
@@ -27,6 +29,7 @@ from infrahub.workers.dependencies import (
27
29
  get_cache,
28
30
  get_component,
29
31
  get_database,
32
+ get_http,
30
33
  get_message_bus,
31
34
  get_workflow,
32
35
  set_component_type,
@@ -129,6 +132,9 @@ class InfrahubWorkerAsync(BaseWorker):
129
132
 
130
133
  await self.service.component.refresh_schema_hash()
131
134
 
135
+ async with self.service.database.start_session() as dbs:
136
+ await validate_graph_version(db=dbs)
137
+
132
138
  initialize_repositories_directory()
133
139
  build_component_registry()
134
140
  await self.service.scheduler.start_schedule()
@@ -138,7 +144,7 @@ class InfrahubWorkerAsync(BaseWorker):
138
144
  self,
139
145
  flow_run: FlowRun,
140
146
  configuration: BaseJobConfiguration,
141
- task_status: TaskStatus | None = None,
147
+ task_status: TaskStatus[int] | None = None,
142
148
  ) -> BaseWorkerResult:
143
149
  flow_run_logger = self.get_flow_run_logger(flow_run)
144
150
 
@@ -154,7 +160,9 @@ class InfrahubWorkerAsync(BaseWorker):
154
160
  if task_status:
155
161
  task_status.started(True)
156
162
 
157
- await run_flow_async(flow=flow_func, flow_run=flow_run, parameters=params, return_type="state")
163
+ async with AsyncClientContext(httpx_settings={"verify": get_http().verify_tls()}) as ctx:
164
+ ctx._httpx_settings = None # Hack to make all child task/flow runs use the same client
165
+ await run_flow_async(flow=flow_func, flow_run=flow_run, parameters=params, return_type="state")
158
166
 
159
167
  return InfrahubWorkerAsyncResult(status_code=0, identifier=str(flow_run.id))
160
168
 
@@ -230,6 +230,13 @@ BRANCH_REBASE = WorkflowDefinition(
230
230
  function="rebase_branch",
231
231
  tags=[WorkflowTag.DATABASE_CHANGE],
232
232
  )
233
+ BRANCH_MIGRATE = WorkflowDefinition(
234
+ name="branch-migrate",
235
+ type=WorkflowType.CORE,
236
+ module="infrahub.core.branch.tasks",
237
+ function="migrate_branch",
238
+ tags=[WorkflowTag.DATABASE_CHANGE],
239
+ )
233
240
 
234
241
  BRANCH_CREATE = WorkflowDefinition(
235
242
  name="create-branch",
@@ -641,6 +648,7 @@ WORKFLOWS = [
641
648
  BRANCH_MERGED,
642
649
  BRANCH_MERGE_MUTATION,
643
650
  BRANCH_MERGE_POST_PROCESS,
651
+ BRANCH_MIGRATE,
644
652
  BRANCH_REBASE,
645
653
  BRANCH_VALIDATE,
646
654
  CLEAN_UP_DEADLOCKS,
@@ -76,6 +76,11 @@ async def setup_task_manager() -> None:
76
76
  await setup_triggers(
77
77
  client=client, triggers=builtin_triggers, trigger_type=TriggerType.BUILTIN, force_update=True
78
78
  )
79
+
80
+
81
+ @flow(name="task-manager-identifiers", flow_run_name="Setup Task Manager Display Labels and HFID")
82
+ async def setup_task_manager_identifiers() -> None:
83
+ async with get_client(sync_client=False) as client:
79
84
  display_label_triggers = await gather_trigger_display_labels_jinja2()
80
85
  await setup_triggers(
81
86
  client=client,
@@ -9,6 +9,7 @@ from prefect.runtime import flow_run
9
9
  from infrahub.core.constants import GLOBAL_BRANCH_NAME
10
10
  from infrahub.core.registry import registry
11
11
  from infrahub.tasks.registry import refresh_branches
12
+ from infrahub.workers.dependencies import get_http
12
13
 
13
14
  from .constants import TAG_NAMESPACE, WorkflowTag
14
15
 
@@ -26,7 +27,7 @@ async def add_tags(
26
27
  namespace: bool = True,
27
28
  db_change: bool = False,
28
29
  ) -> None:
29
- client = get_client(sync_client=False)
30
+ client = get_client(httpx_settings={"verify": get_http().verify_tls()}, sync_client=False)
30
31
  current_flow_run_id = flow_run.id
31
32
  current_tags: list[str] = flow_run.tags
32
33
  branch_tags = (
infrahub_sdk/client.py CHANGED
@@ -94,7 +94,9 @@ class ProcessRelationsNodeSync(TypedDict):
94
94
  related_nodes: list[InfrahubNodeSync]
95
95
 
96
96
 
97
- def handle_relogin(func: Callable[..., Coroutine[Any, Any, httpx.Response]]): # type: ignore[no-untyped-def]
97
+ def handle_relogin(
98
+ func: Callable[..., Coroutine[Any, Any, httpx.Response]],
99
+ ) -> Callable[..., Coroutine[Any, Any, httpx.Response]]:
98
100
  @wraps(func)
99
101
  async def wrapper(client: InfrahubClient, *args: Any, **kwargs: Any) -> httpx.Response:
100
102
  response = await func(client, *args, **kwargs)
@@ -108,7 +110,7 @@ def handle_relogin(func: Callable[..., Coroutine[Any, Any, httpx.Response]]): #
108
110
  return wrapper
109
111
 
110
112
 
111
- def handle_relogin_sync(func: Callable[..., httpx.Response]): # type: ignore[no-untyped-def]
113
+ def handle_relogin_sync(func: Callable[..., httpx.Response]) -> Callable[..., httpx.Response]:
112
114
  @wraps(func)
113
115
  def wrapper(client: InfrahubClientSync, *args: Any, **kwargs: Any) -> httpx.Response:
114
116
  response = func(client, *args, **kwargs)
@@ -170,6 +172,7 @@ class BaseClient:
170
172
  self.group_context: InfrahubGroupContext | InfrahubGroupContextSync
171
173
  self._initialize()
172
174
  self._request_context: RequestContext | None = None
175
+ _ = self.config.tls_context # Early load of the TLS context to catch errors
173
176
 
174
177
  def _initialize(self) -> None:
175
178
  """Sets the properties for each version of the client"""
@@ -574,7 +577,7 @@ class InfrahubClient(BaseClient):
574
577
  schema_kind (str): The kind of schema being queried.
575
578
  branch (str): The branch name.
576
579
  prefetch_relationships (bool): Flag to indicate whether to prefetch relationship data.
577
- timeout (int, optional): Overrides default timeout used when querying the graphql API. Specified in seconds.
580
+ timeout (int, optional): Overrides default timeout used when querying the GraphQL API. Specified in seconds.
578
581
 
579
582
  Returns:
580
583
  ProcessRelationsNodeSync: A TypedDict containing two lists:
@@ -694,7 +697,7 @@ class InfrahubClient(BaseClient):
694
697
  at (Timestamp, optional): Time of the query. Defaults to Now.
695
698
  branch (str, optional): Name of the branch to query from. Defaults to default_branch.
696
699
  populate_store (bool, optional): Flag to indicate whether to populate the store with the retrieved nodes.
697
- timeout (int, optional): Overrides default timeout used when querying the graphql API. Specified in seconds.
700
+ timeout (int, optional): Overrides default timeout used when querying the GraphQL API. Specified in seconds.
698
701
  offset (int, optional): The offset for pagination.
699
702
  limit (int, optional): The limit for pagination.
700
703
  include (list[str], optional): List of attributes or relationships to include in the query.
@@ -791,7 +794,7 @@ class InfrahubClient(BaseClient):
791
794
  kind (str): kind of the nodes to query
792
795
  at (Timestamp, optional): Time of the query. Defaults to Now.
793
796
  branch (str, optional): Name of the branch to query from. Defaults to default_branch.
794
- timeout (int, optional): Overrides default timeout used when querying the graphql API. Specified in seconds.
797
+ timeout (int, optional): Overrides default timeout used when querying the GraphQL API. Specified in seconds.
795
798
  populate_store (bool, optional): Flag to indicate whether to populate the store with the retrieved nodes.
796
799
  offset (int, optional): The offset for pagination.
797
800
  limit (int, optional): The limit for pagination.
@@ -1073,7 +1076,7 @@ class InfrahubClient(BaseClient):
1073
1076
 
1074
1077
  async with httpx.AsyncClient(
1075
1078
  **proxy_config,
1076
- verify=self.config.tls_ca_file if self.config.tls_ca_file else not self.config.tls_insecure,
1079
+ verify=self.config.tls_context,
1077
1080
  ) as client:
1078
1081
  try:
1079
1082
  response = await client.request(
@@ -1945,7 +1948,7 @@ class InfrahubClientSync(BaseClient):
1945
1948
  kind (str): kind of the nodes to query
1946
1949
  at (Timestamp, optional): Time of the query. Defaults to Now.
1947
1950
  branch (str, optional): Name of the branch to query from. Defaults to default_branch.
1948
- timeout (int, optional): Overrides default timeout used when querying the graphql API. Specified in seconds.
1951
+ timeout (int, optional): Overrides default timeout used when querying the GraphQL API. Specified in seconds.
1949
1952
  populate_store (bool, optional): Flag to indicate whether to populate the store with the retrieved nodes.
1950
1953
  offset (int, optional): The offset for pagination.
1951
1954
  limit (int, optional): The limit for pagination.
@@ -1992,7 +1995,7 @@ class InfrahubClientSync(BaseClient):
1992
1995
  schema_kind (str): The kind of schema being queried.
1993
1996
  branch (str): The branch name.
1994
1997
  prefetch_relationships (bool): Flag to indicate whether to prefetch relationship data.
1995
- timeout (int, optional): Overrides default timeout used when querying the graphql API. Specified in seconds.
1998
+ timeout (int, optional): Overrides default timeout used when querying the GraphQL API. Specified in seconds.
1996
1999
 
1997
2000
  Returns:
1998
2001
  ProcessRelationsNodeSync: A TypedDict containing two lists:
@@ -2084,7 +2087,7 @@ class InfrahubClientSync(BaseClient):
2084
2087
  kind (str): kind of the nodes to query
2085
2088
  at (Timestamp, optional): Time of the query. Defaults to Now.
2086
2089
  branch (str, optional): Name of the branch to query from. Defaults to default_branch.
2087
- timeout (int, optional): Overrides default timeout used when querying the graphql API. Specified in seconds.
2090
+ timeout (int, optional): Overrides default timeout used when querying the GraphQL API. Specified in seconds.
2088
2091
  populate_store (bool, optional): Flag to indicate whether to populate the store with the retrieved nodes.
2089
2092
  offset (int, optional): The offset for pagination.
2090
2093
  limit (int, optional): The limit for pagination.
@@ -2913,7 +2916,7 @@ class InfrahubClientSync(BaseClient):
2913
2916
 
2914
2917
  with httpx.Client(
2915
2918
  **proxy_config,
2916
- verify=self.config.tls_ca_file if self.config.tls_ca_file else not self.config.tls_insecure,
2919
+ verify=self.config.tls_context,
2917
2920
  ) as client:
2918
2921
  try:
2919
2922
  response = client.request(