infrahub-server 1.4.11__py3-none-any.whl → 1.4.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -233,11 +233,15 @@ class NodeCreateAllQuery(NodeQuery):
233
233
  ipnetwork_prop_list = [f"{key}: {value}" for key, value in ipnetwork_prop.items()]
234
234
 
235
235
  attrs_nonindexed_query = """
236
- WITH distinct n
236
+ WITH DISTINCT n
237
237
  UNWIND $attrs AS attr
238
238
  // Try to find a matching vertex
239
- OPTIONAL MATCH (existing_av:AttributeValue {value: attr.content.value, is_default: attr.content.is_default})
240
- WHERE NOT existing_av:AttributeValueIndexed
239
+ CALL (attr) {
240
+ OPTIONAL MATCH (existing_av:AttributeValue {value: attr.content.value, is_default: attr.content.is_default})
241
+ WHERE NOT existing_av:AttributeValueIndexed
242
+ RETURN existing_av
243
+ LIMIT 1
244
+ }
241
245
  CALL (attr, existing_av) {
242
246
  // If none found, create a new one
243
247
  WITH existing_av
@@ -623,45 +627,46 @@ class NodeListGetAttributeQuery(Query):
623
627
  self.add_to_query(query)
624
628
 
625
629
  query = """
626
- CALL (n, a) {
627
- MATCH (n)-[r:HAS_ATTRIBUTE]-(a:Attribute)
628
- WHERE %(branch_filter)s
629
- RETURN n as n1, r as r1, a as a1
630
- ORDER BY r.branch_level DESC, r.from DESC
631
- LIMIT 1
632
- }
633
- WITH n1 as n, r1, a1 as a
634
- WHERE r1.status = "active"
635
- WITH n, r1, a
636
- MATCH (a)-[r:HAS_VALUE]-(av:AttributeValue)
637
- WHERE %(branch_filter)s
638
- CALL (a, av) {
639
- MATCH (a)-[r:HAS_VALUE]-(av:AttributeValue)
640
- WHERE %(branch_filter)s
641
- RETURN a as a1, r as r2, av as av1
642
- ORDER BY r.branch_level DESC, r.from DESC
643
- LIMIT 1
644
- }
645
- WITH n, r1, a1 as a, r2, av1 as av
646
- WHERE r2.status = "active"
647
- WITH n, a, av, r1, r2
630
+ CALL (n, a) {
631
+ MATCH (n)-[r:HAS_ATTRIBUTE]->(a:Attribute)
632
+ WHERE %(branch_filter)s
633
+ RETURN r.status = "active" AS is_active
634
+ ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
635
+ LIMIT 1
636
+ }
637
+ WITH n, a
638
+ WHERE is_active = TRUE
639
+ CALL (a) {
640
+ MATCH (a)-[r:HAS_VALUE]->(av:AttributeValue)
641
+ WHERE %(branch_filter)s
642
+ RETURN av, r AS r2
643
+ ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
644
+ LIMIT 1
645
+ }
646
+ WITH n, a, av, r2
647
+ WHERE r2.status = "active"
648
648
  """ % {"branch_filter": branch_filter}
649
649
  self.add_to_query(query)
650
650
 
651
- self.return_labels = ["n", "a", "av", "r1", "r2"]
651
+ self.return_labels = ["n", "a", "av", "r2"]
652
652
 
653
653
  # Add Is_Protected and Is_visible
654
- rel_isv_branch_filter, _ = self.branch.get_query_filter_path(
655
- at=self.at, branch_agnostic=self.branch_agnostic, variable_name="rel_isv"
656
- )
657
- rel_isp_branch_filter, _ = self.branch.get_query_filter_path(
658
- at=self.at, branch_agnostic=self.branch_agnostic, variable_name="rel_isp"
659
- )
660
654
  query = """
661
- MATCH (a)-[rel_isv:IS_VISIBLE]-(isv:Boolean)
662
- MATCH (a)-[rel_isp:IS_PROTECTED]-(isp:Boolean)
663
- WHERE (%(rel_isv_branch_filter)s) AND (%(rel_isp_branch_filter)s)
664
- """ % {"rel_isv_branch_filter": rel_isv_branch_filter, "rel_isp_branch_filter": rel_isp_branch_filter}
655
+ CALL (a) {
656
+ MATCH (a)-[r:IS_VISIBLE]-(isv:Boolean)
657
+ WHERE (%(branch_filter)s)
658
+ RETURN r AS rel_isv, isv
659
+ ORDER BY rel_isv.branch_level DESC, rel_isv.from DESC, rel_isv.status ASC
660
+ LIMIT 1
661
+ }
662
+ CALL (a) {
663
+ MATCH (a)-[r:IS_PROTECTED]-(isp:Boolean)
664
+ WHERE (%(branch_filter)s)
665
+ RETURN r AS rel_isp, isp
666
+ ORDER BY rel_isp.branch_level DESC, rel_isp.from DESC, rel_isp.status ASC
667
+ LIMIT 1
668
+ }
669
+ """ % {"branch_filter": branch_filter}
665
670
  self.add_to_query(query)
666
671
 
667
672
  self.return_labels.extend(["isv", "isp", "rel_isv", "rel_isp"])
@@ -874,6 +879,7 @@ class NodeListGetRelationshipsQuery(Query):
874
879
  RETURN DISTINCT n_uuid, rel_name, peer_uuid, direction
875
880
  """ % {"filters": rels_filter}
876
881
  self.add_to_query(query)
882
+ self.order_by = ["n_uuid", "rel_name", "peer_uuid", "direction"]
877
883
  self.return_labels = ["n_uuid", "rel_name", "peer_uuid", "direction"]
878
884
 
879
885
  def get_peers_group_by_node(self) -> GroupedPeerNodes:
infrahub/git/base.py CHANGED
@@ -932,7 +932,10 @@ class InfrahubRepositoryBase(BaseModel, ABC):
932
932
  def _raise_enriched_error_static(
933
933
  error: GitCommandError, name: str, location: str, branch_name: str | None = None
934
934
  ) -> NoReturn:
935
- if "Repository not found" in error.stderr or "does not appear to be a git" in error.stderr:
935
+ if any(
936
+ err in error.stderr
937
+ for err in ("Repository not found", "does not appear to be a git", "Failed to connect to")
938
+ ):
936
939
  raise RepositoryConnectionError(identifier=name) from error
937
940
 
938
941
  if "error: pathspec" in error.stderr:
infrahub/git/tasks.py CHANGED
@@ -1,3 +1,5 @@
1
+ from typing import Any
2
+
1
3
  from infrahub_sdk import InfrahubClient
2
4
  from infrahub_sdk.protocols import (
3
5
  CoreArtifact,
@@ -14,7 +16,12 @@ from prefect.logging import get_run_logger
14
16
 
15
17
  from infrahub import lock
16
18
  from infrahub.context import InfrahubContext
17
- from infrahub.core.constants import InfrahubKind, RepositoryInternalStatus, ValidatorConclusion
19
+ from infrahub.core.constants import (
20
+ InfrahubKind,
21
+ RepositoryInternalStatus,
22
+ RepositoryOperationalStatus,
23
+ ValidatorConclusion,
24
+ )
18
25
  from infrahub.core.manager import NodeManager
19
26
  from infrahub.core.registry import registry
20
27
  from infrahub.exceptions import CheckError, RepositoryError
@@ -152,6 +159,39 @@ async def create_branch(branch: str, branch_id: str) -> None:
152
159
  pass
153
160
 
154
161
 
162
+ @flow(name="sync-git-repo-with-origin", flow_run_name="Sync git repo with origin")
163
+ async def sync_git_repo_with_origin_and_tag_on_failure(
164
+ client: InfrahubClient,
165
+ repository_id: str,
166
+ repository_name: str,
167
+ repository_location: str,
168
+ internal_status: str,
169
+ default_branch_name: str,
170
+ operational_status: str,
171
+ staging_branch: str | None = None,
172
+ infrahub_branch: str | None = None,
173
+ ) -> None:
174
+ repo = await InfrahubRepository.init(
175
+ id=repository_id,
176
+ name=repository_name,
177
+ location=repository_location,
178
+ client=client,
179
+ internal_status=internal_status,
180
+ default_branch_name=default_branch_name,
181
+ )
182
+
183
+ try:
184
+ await repo.sync(staging_branch=staging_branch)
185
+ except RepositoryError:
186
+ if operational_status == RepositoryOperationalStatus.ONLINE.value:
187
+ params: dict[str, Any] = {
188
+ "branches": [infrahub_branch] if infrahub_branch else [],
189
+ "nodes": [str(repository_id)],
190
+ }
191
+ await add_tags(**params)
192
+ raise
193
+
194
+
155
195
  @flow(name="git_repositories_sync", flow_run_name="Sync Git Repositories")
156
196
  async def sync_remote_repositories() -> None:
157
197
  log = get_run_logger()
@@ -204,7 +244,17 @@ async def sync_remote_repositories() -> None:
204
244
  continue
205
245
 
206
246
  try:
207
- await repo.sync(staging_branch=staging_branch)
247
+ await sync_git_repo_with_origin_and_tag_on_failure(
248
+ client=client,
249
+ repository_id=repository_data.repository.id,
250
+ repository_name=repository_data.repository.name.value,
251
+ repository_location=repository_data.repository.location.value,
252
+ internal_status=active_internal_status,
253
+ default_branch_name=repository_data.repository.default_branch.value,
254
+ operational_status=repository_data.repository.operational_status.value,
255
+ staging_branch=staging_branch,
256
+ infrahub_branch=infrahub_branch,
257
+ )
208
258
  # Tell workers to fetch to stay in sync
209
259
  message = messages.RefreshGitFetch(
210
260
  meta=Meta(initiator_id=WORKER_IDENTITY, request_id=get_log_data().get("request_id", "")),
@@ -312,6 +312,13 @@ class GraphQLQueryReport:
312
312
  return []
313
313
 
314
314
  def required_argument(self, argument: GraphQLArgument) -> bool:
315
+ if argument.name == "ids" and argument.kind == "list_value":
316
+ for variable in self.variables:
317
+ if f"['${variable.name}']" == argument.as_variable_name and variable.required:
318
+ return True
319
+
320
+ return False
321
+
315
322
  if not argument.is_variable:
316
323
  # If the argument isn't a variable it would have been
317
324
  # statically defined in the input and as such required
@@ -364,6 +371,8 @@ class GraphQLQueryReport:
364
371
  if [[argument.name]] == query.infrahub_model.uniqueness_constraints:
365
372
  if self.required_argument(argument=argument):
366
373
  targets_single_query = True
374
+ elif argument.name == "ids" and self.required_argument(argument=argument):
375
+ targets_single_query = True
367
376
 
368
377
  if not targets_single_query:
369
378
  return False
@@ -787,10 +787,7 @@ class GraphQLSchemaManager:
787
787
  attr_kind = get_attr_kind(schema, attr)
788
788
  attr_type = get_attribute_type(kind=attr_kind).get_graphql_update()
789
789
 
790
- # A Field is not required if explicitly indicated or if a default value has been provided
791
- required = not attr.optional if not attr.default_value else False
792
-
793
- attrs[attr.name] = graphene.InputField(attr_type, required=required, description=attr.description)
790
+ attrs[attr.name] = graphene.InputField(attr_type, description=attr.description)
794
791
 
795
792
  for rel in schema.relationships:
796
793
  if rel.internal_peer or rel.read_only:
@@ -798,14 +795,11 @@ class GraphQLSchemaManager:
798
795
 
799
796
  input_type = self._get_related_input_type(relationship=rel)
800
797
 
801
- required = not rel.optional
802
798
  if rel.cardinality == RelationshipCardinality.ONE:
803
- attrs[rel.name] = graphene.InputField(input_type, required=required, description=rel.description)
799
+ attrs[rel.name] = graphene.InputField(input_type, description=rel.description)
804
800
 
805
801
  elif rel.cardinality == RelationshipCardinality.MANY:
806
- attrs[rel.name] = graphene.InputField(
807
- graphene.List(input_type), required=required, description=rel.description
808
- )
802
+ attrs[rel.name] = graphene.InputField(graphene.List(input_type), description=rel.description)
809
803
 
810
804
  return type(f"{schema.kind}UpsertInput", (graphene.InputObjectType,), attrs)
811
805
 
@@ -92,6 +92,7 @@ class ProposedChangeArtifactDefinition(BaseModel):
92
92
  query_name: str # Deprecated
93
93
  query_id: str
94
94
  query_models: list[str]
95
+ query_payload: str = Field(..., description="GraphQL query")
95
96
  repository_id: str
96
97
  transform_kind: str
97
98
  template_path: str = Field(default="")
@@ -11,6 +11,7 @@ import pytest
11
11
  from infrahub_sdk.exceptions import ModuleImportError
12
12
  from infrahub_sdk.node import InfrahubNode
13
13
  from infrahub_sdk.protocols import (
14
+ CoreArtifactDefinition,
14
15
  CoreArtifactValidator,
15
16
  CoreGeneratorDefinition,
16
17
  CoreGeneratorValidator,
@@ -44,7 +45,7 @@ from infrahub.core.diff.model.diff import DiffElementType, SchemaConflict
44
45
  from infrahub.core.diff.model.path import NodeDiffFieldSummary
45
46
  from infrahub.core.integrity.object_conflict.conflict_recorder import ObjectConflictValidatorRecorder
46
47
  from infrahub.core.manager import NodeManager
47
- from infrahub.core.protocols import CoreArtifactDefinition, CoreDataCheck, CoreValidator
48
+ from infrahub.core.protocols import CoreDataCheck, CoreValidator
48
49
  from infrahub.core.protocols import CoreProposedChange as InternalCoreProposedChange
49
50
  from infrahub.core.timestamp import Timestamp
50
51
  from infrahub.core.validators.checks_runner import run_checks_and_update_validator
@@ -59,6 +60,8 @@ from infrahub.git.base import extract_repo_file_information
59
60
  from infrahub.git.models import TriggerRepositoryInternalChecks, TriggerRepositoryUserChecks
60
61
  from infrahub.git.repository import InfrahubRepository, get_initialized_repo
61
62
  from infrahub.git.utils import fetch_artifact_definition_targets, fetch_proposed_change_generator_definition_targets
63
+ from infrahub.graphql.analyzer import InfrahubGraphQLQueryAnalyzer
64
+ from infrahub.graphql.initialization import prepare_graphql_params
62
65
  from infrahub.log import get_logger
63
66
  from infrahub.message_bus.types import (
64
67
  ProposedChangeArtifactDefinition,
@@ -664,6 +667,27 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
664
667
  repository = model.branch_diff.get_repository(repository_id=model.artifact_definition.repository_id)
665
668
  impacted_artifacts = model.branch_diff.get_subscribers_ids(kind=InfrahubKind.ARTIFACT)
666
669
 
670
+ source_schema_branch = registry.schema.get_schema_branch(name=model.source_branch)
671
+ source_branch = registry.get_branch_from_registry(branch=model.source_branch)
672
+
673
+ graphql_params = await prepare_graphql_params(db=await get_database(), branch=model.source_branch)
674
+ query_analyzer = InfrahubGraphQLQueryAnalyzer(
675
+ query=model.artifact_definition.query_payload,
676
+ branch=source_branch,
677
+ schema_branch=source_schema_branch,
678
+ schema=graphql_params.schema,
679
+ )
680
+
681
+ only_has_unique_targets = query_analyzer.query_report.only_has_unique_targets
682
+ if not only_has_unique_targets:
683
+ log.warning(
684
+ f"Artifact definition {artifact_definition.name.value} query does not guarantee unique targets. All targets will be processed."
685
+ )
686
+
687
+ managed_branch = model.source_branch_sync_with_git and model.branch_diff.has_file_modifications
688
+ if managed_branch:
689
+ log.info("Source branch is synced with Git repositories with updates, all artifacts will be processed")
690
+
667
691
  checks = []
668
692
 
669
693
  for relationship in group.members.peers:
@@ -671,8 +695,9 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
671
695
  artifact_id = artifacts_by_member.get(member.id)
672
696
  if _should_render_artifact(
673
697
  artifact_id=artifact_id,
674
- managed_branch=model.source_branch_sync_with_git,
698
+ managed_branch=managed_branch,
675
699
  impacted_artifacts=impacted_artifacts,
700
+ only_has_unique_targets=only_has_unique_targets,
676
701
  ):
677
702
  log.info(f"Trigger Artifact processing for {member.display_label}")
678
703
 
@@ -718,21 +743,26 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
718
743
  )
719
744
 
720
745
 
721
- def _should_render_artifact(artifact_id: str | None, managed_branch: bool, impacted_artifacts: list[str]) -> bool: # noqa: ARG001
746
+ def _should_render_artifact(
747
+ artifact_id: str | None,
748
+ managed_branch: bool,
749
+ impacted_artifacts: list[str],
750
+ only_has_unique_targets: bool,
751
+ ) -> bool:
722
752
  """Returns a boolean to indicate if an artifact should be generated or not.
723
753
  Will return true if:
724
754
  * The artifact_id wasn't set which could be that it's a new object that doesn't have a previous artifact
725
- * The source brance is not data only which would indicate that it could contain updates in git to the transform
755
+ * The source branch is not data only which would indicate that it could contain updates in git to the transform
726
756
  * The artifact_id exists in the impacted_artifacts list
757
+ * The query failes the only_has_unique_targets check
727
758
  Will return false if:
728
759
  * The source branch is a data only branch and the artifact_id exists and is not in the impacted list
729
760
  """
730
761
 
731
- # if not artifact_id or managed_branch:
732
- # return True
733
- # return artifact_id in impacted_artifacts
734
- # Temporary workaround tracked in https://github.com/opsmill/infrahub/issues/4991
735
- return True
762
+ if not only_has_unique_targets or not artifact_id or managed_branch:
763
+ return True
764
+
765
+ return artifact_id in impacted_artifacts
736
766
 
737
767
 
738
768
  @flow(
@@ -1249,6 +1279,9 @@ query GatherArtifactDefinitions {
1249
1279
  name {
1250
1280
  value
1251
1281
  }
1282
+ query {
1283
+ value
1284
+ }
1252
1285
  }
1253
1286
  }
1254
1287
  ... on CoreTransformJinja2 {
@@ -1466,6 +1499,7 @@ def _parse_artifact_definitions(definitions: list[dict]) -> list[ProposedChangeA
1466
1499
  query_name=definition["node"]["transformation"]["node"]["query"]["node"]["name"]["value"],
1467
1500
  query_id=definition["node"]["transformation"]["node"]["query"]["node"]["id"],
1468
1501
  query_models=definition["node"]["transformation"]["node"]["query"]["node"]["models"]["value"] or [],
1502
+ query_payload=definition["node"]["transformation"]["node"]["query"]["node"]["query"]["value"],
1469
1503
  repository_id=definition["node"]["transformation"]["node"]["repository"]["node"]["id"],
1470
1504
  transform_kind=definition["node"]["transformation"]["node"]["__typename"],
1471
1505
  )
@@ -3,10 +3,15 @@ from __future__ import annotations
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
5
  if TYPE_CHECKING:
6
+ import ssl
7
+
6
8
  import httpx
7
9
 
8
10
 
9
11
  class InfrahubHTTP:
12
+ def verify_tls(self, verify: bool | None = None) -> bool | ssl.SSLContext:
13
+ raise NotImplementedError()
14
+
10
15
  async def get(
11
16
  self,
12
17
  url: str,
@@ -3,8 +3,10 @@ from __future__ import annotations
3
3
  from typing import TYPE_CHECKING, Any, overload
4
4
 
5
5
  from prefect.client.schemas.objects import StateType
6
+ from prefect.context import AsyncClientContext
6
7
  from prefect.deployments import run_deployment
7
8
 
9
+ from infrahub.services.adapters.http.httpx import HttpxAdapter
8
10
  from infrahub.workers.utils import inject_context_parameter
9
11
  from infrahub.workflows.initialization import setup_task_manager
10
12
  from infrahub.workflows.models import WorkflowInfo
@@ -19,6 +21,11 @@ if TYPE_CHECKING:
19
21
 
20
22
 
21
23
  class WorkflowWorkerExecution(InfrahubWorkflow):
24
+ # This is required to grab a cached SSLContext from the HttpAdapter.
25
+ # We cannot use the get_http() dependency since it introduces a circular dependency.
26
+ # We could remove this later on by introducing a cached SSLContext outside of this adapter.
27
+ _http_adapter = HttpxAdapter()
28
+
22
29
  @staticmethod
23
30
  async def initialize(component_is_primary_server: bool) -> None:
24
31
  if component_is_primary_server:
@@ -79,5 +86,6 @@ class WorkflowWorkerExecution(InfrahubWorkflow):
79
86
  parameters = dict(parameters) if parameters is not None else {}
80
87
  inject_context_parameter(func=flow_func, parameters=parameters, context=context)
81
88
 
82
- flow_run = await run_deployment(name=workflow.full_name, timeout=0, parameters=parameters or {}, tags=tags) # type: ignore[return-value, misc]
89
+ async with AsyncClientContext(httpx_settings={"verify": self._http_adapter.verify_tls()}):
90
+ flow_run = await run_deployment(name=workflow.full_name, timeout=0, parameters=parameters or {}, tags=tags) # type: ignore[return-value, misc]
83
91
  return WorkflowInfo.from_flow(flow_run=flow_run)
@@ -1,7 +1,10 @@
1
+ import asyncio
1
2
  import uuid
3
+ from datetime import datetime, timedelta, timezone
2
4
  from typing import Any
3
5
  from uuid import UUID
4
6
 
7
+ from prefect import State
5
8
  from prefect.client.orchestration import PrefectClient, get_client
6
9
  from prefect.client.schemas.filters import (
7
10
  ArtifactFilter,
@@ -12,6 +15,7 @@ from prefect.client.schemas.filters import (
12
15
  FlowRunFilter,
13
16
  FlowRunFilterId,
14
17
  FlowRunFilterName,
18
+ FlowRunFilterStartTime,
15
19
  FlowRunFilterState,
16
20
  FlowRunFilterStateType,
17
21
  FlowRunFilterTags,
@@ -311,3 +315,72 @@ class PrefectTask:
311
315
  )
312
316
 
313
317
  return {"count": count or 0, "edges": nodes}
318
+
319
+ @classmethod
320
+ async def delete_flow_runs(
321
+ cls,
322
+ states: list[StateType] = [StateType.COMPLETED, StateType.FAILED, StateType.CANCELLED], # noqa: B006
323
+ delete: bool = True,
324
+ days_to_keep: int = 2,
325
+ batch_size: int = 100,
326
+ ) -> None:
327
+ """Delete flow runs in the specified states and older than specified days."""
328
+
329
+ logger = get_logger()
330
+
331
+ async with get_client(sync_client=False) as client:
332
+ cutoff = datetime.now(timezone.utc) - timedelta(days=days_to_keep)
333
+
334
+ flow_run_filter = FlowRunFilter(
335
+ start_time=FlowRunFilterStartTime(before_=cutoff), # type: ignore[arg-type]
336
+ state=FlowRunFilterState(type=FlowRunFilterStateType(any_=states)),
337
+ )
338
+
339
+ # Get flow runs to delete
340
+ flow_runs = await client.read_flow_runs(flow_run_filter=flow_run_filter, limit=batch_size)
341
+
342
+ deleted_total = 0
343
+
344
+ while True:
345
+ batch_deleted = 0
346
+ failed_deletes = []
347
+
348
+ # Delete each flow run through the API
349
+ for flow_run in flow_runs:
350
+ try:
351
+ if delete:
352
+ await client.delete_flow_run(flow_run_id=flow_run.id)
353
+ else:
354
+ await client.set_flow_run_state(
355
+ flow_run_id=flow_run.id,
356
+ state=State(type=StateType.CRASHED),
357
+ force=True,
358
+ )
359
+ deleted_total += 1
360
+ batch_deleted += 1
361
+ except Exception as e:
362
+ logger.warning(f"Failed to delete flow run {flow_run.id}: {e}")
363
+ failed_deletes.append(flow_run.id)
364
+
365
+ # Rate limiting
366
+ if batch_deleted % 10 == 0:
367
+ await asyncio.sleep(0.5)
368
+
369
+ logger.info(f"Delete {batch_deleted}/{len(flow_runs)} flow runs (total: {deleted_total})")
370
+
371
+ # Get next batch
372
+ previous_flow_run_ids = [fr.id for fr in flow_runs]
373
+ flow_runs = await client.read_flow_runs(flow_run_filter=flow_run_filter, limit=batch_size)
374
+
375
+ if not flow_runs:
376
+ logger.info("No more flow runs to delete")
377
+ break
378
+
379
+ if previous_flow_run_ids == [fr.id for fr in flow_runs]:
380
+ logger.info("Found same flow runs to delete, aborting")
381
+ break
382
+
383
+ # Delay between batches to avoid overwhelming the API
384
+ await asyncio.sleep(1.0)
385
+
386
+ logger.info(f"Retention complete. Total deleted tasks: {deleted_total}")
@@ -8,6 +8,7 @@ from infrahub_sdk import Config, InfrahubClient
8
8
  from infrahub_sdk.exceptions import Error as SdkError
9
9
  from prefect import settings as prefect_settings
10
10
  from prefect.client.schemas.objects import FlowRun
11
+ from prefect.context import AsyncClientContext
11
12
  from prefect.flow_engine import run_flow_async
12
13
  from prefect.logging.handlers import APILogHandler
13
14
  from prefect.workers.base import BaseJobConfiguration, BaseVariables, BaseWorker, BaseWorkerResult
@@ -27,6 +28,7 @@ from infrahub.workers.dependencies import (
27
28
  get_cache,
28
29
  get_component,
29
30
  get_database,
31
+ get_http,
30
32
  get_message_bus,
31
33
  get_workflow,
32
34
  set_component_type,
@@ -154,7 +156,9 @@ class InfrahubWorkerAsync(BaseWorker):
154
156
  if task_status:
155
157
  task_status.started(True)
156
158
 
157
- await run_flow_async(flow=flow_func, flow_run=flow_run, parameters=params, return_type="state")
159
+ async with AsyncClientContext(httpx_settings={"verify": get_http().verify_tls()}) as ctx:
160
+ ctx._httpx_settings = None # Hack to make all child task/flow runs use the same client
161
+ await run_flow_async(flow=flow_func, flow_run=flow_run, parameters=params, return_type="state")
158
162
 
159
163
  return InfrahubWorkerAsyncResult(status_code=0, identifier=str(flow_run.id))
160
164
 
@@ -9,6 +9,7 @@ from prefect.runtime import flow_run
9
9
  from infrahub.core.constants import GLOBAL_BRANCH_NAME
10
10
  from infrahub.core.registry import registry
11
11
  from infrahub.tasks.registry import refresh_branches
12
+ from infrahub.workers.dependencies import get_http
12
13
 
13
14
  from .constants import TAG_NAMESPACE, WorkflowTag
14
15
 
@@ -26,7 +27,7 @@ async def add_tags(
26
27
  namespace: bool = True,
27
28
  db_change: bool = False,
28
29
  ) -> None:
29
- client = get_client(sync_client=False)
30
+ client = get_client(httpx_settings={"verify": get_http().verify_tls()}, sync_client=False)
30
31
  current_flow_run_id = flow_run.id
31
32
  current_tags: list[str] = flow_run.tags
32
33
  branch_tags = (
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: infrahub-server
3
- Version: 1.4.11
3
+ Version: 1.4.13
4
4
  Summary: Infrahub is taking a new approach to Infrastructure Management by providing a new generation of datastore to organize and control all the data that defines how an infrastructure should run.
5
5
  License: Apache-2.0
6
6
  Author: OpsMill