infrahub-server 1.5.0b1__py3-none-any.whl → 1.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (171) hide show
  1. infrahub/api/dependencies.py +4 -13
  2. infrahub/api/internal.py +2 -0
  3. infrahub/api/oauth2.py +13 -19
  4. infrahub/api/oidc.py +15 -21
  5. infrahub/api/schema.py +24 -3
  6. infrahub/api/transformation.py +22 -20
  7. infrahub/artifacts/models.py +2 -1
  8. infrahub/auth.py +137 -3
  9. infrahub/cli/__init__.py +2 -0
  10. infrahub/cli/db.py +158 -155
  11. infrahub/cli/dev.py +118 -0
  12. infrahub/cli/tasks.py +46 -0
  13. infrahub/cli/upgrade.py +56 -9
  14. infrahub/computed_attribute/tasks.py +20 -8
  15. infrahub/core/attribute.py +10 -2
  16. infrahub/core/branch/enums.py +1 -1
  17. infrahub/core/branch/models.py +7 -3
  18. infrahub/core/branch/tasks.py +68 -7
  19. infrahub/core/constants/__init__.py +3 -0
  20. infrahub/core/diff/calculator.py +2 -2
  21. infrahub/core/diff/query/artifact.py +1 -0
  22. infrahub/core/diff/query/delete_query.py +9 -5
  23. infrahub/core/diff/query/field_summary.py +1 -0
  24. infrahub/core/diff/query/merge.py +39 -23
  25. infrahub/core/graph/__init__.py +1 -1
  26. infrahub/core/initialization.py +5 -2
  27. infrahub/core/migrations/__init__.py +3 -0
  28. infrahub/core/migrations/exceptions.py +4 -0
  29. infrahub/core/migrations/graph/__init__.py +12 -13
  30. infrahub/core/migrations/graph/load_schema_branch.py +21 -0
  31. infrahub/core/migrations/graph/m013_convert_git_password_credential.py +1 -1
  32. infrahub/core/migrations/graph/m037_index_attr_vals.py +11 -30
  33. infrahub/core/migrations/graph/m039_ipam_reconcile.py +9 -7
  34. infrahub/core/migrations/graph/m040_duplicated_attributes.py +81 -0
  35. infrahub/core/migrations/graph/m041_deleted_dup_edges.py +149 -0
  36. infrahub/core/migrations/graph/m042_profile_attrs_in_db.py +147 -0
  37. infrahub/core/migrations/graph/m043_create_hfid_display_label_in_db.py +164 -0
  38. infrahub/core/migrations/graph/m044_backfill_hfid_display_label_in_db.py +864 -0
  39. infrahub/core/migrations/query/__init__.py +7 -8
  40. infrahub/core/migrations/query/attribute_add.py +8 -6
  41. infrahub/core/migrations/query/attribute_remove.py +134 -0
  42. infrahub/core/migrations/runner.py +54 -0
  43. infrahub/core/migrations/schema/attribute_kind_update.py +9 -3
  44. infrahub/core/migrations/schema/attribute_supports_profile.py +90 -0
  45. infrahub/core/migrations/schema/node_attribute_add.py +30 -2
  46. infrahub/core/migrations/schema/node_attribute_remove.py +13 -109
  47. infrahub/core/migrations/schema/node_kind_update.py +2 -1
  48. infrahub/core/migrations/schema/node_remove.py +2 -1
  49. infrahub/core/migrations/schema/placeholder_dummy.py +3 -2
  50. infrahub/core/migrations/shared.py +62 -14
  51. infrahub/core/models.py +2 -2
  52. infrahub/core/node/__init__.py +42 -12
  53. infrahub/core/node/create.py +46 -63
  54. infrahub/core/node/lock_utils.py +70 -44
  55. infrahub/core/node/resource_manager/ip_address_pool.py +2 -1
  56. infrahub/core/node/resource_manager/ip_prefix_pool.py +2 -1
  57. infrahub/core/node/resource_manager/number_pool.py +2 -1
  58. infrahub/core/query/attribute.py +55 -0
  59. infrahub/core/query/diff.py +61 -16
  60. infrahub/core/query/ipam.py +16 -4
  61. infrahub/core/query/node.py +51 -43
  62. infrahub/core/query/relationship.py +1 -0
  63. infrahub/core/relationship/model.py +10 -5
  64. infrahub/core/schema/__init__.py +56 -0
  65. infrahub/core/schema/attribute_schema.py +4 -0
  66. infrahub/core/schema/definitions/core/check.py +1 -1
  67. infrahub/core/schema/definitions/core/transform.py +1 -1
  68. infrahub/core/schema/definitions/internal.py +2 -2
  69. infrahub/core/schema/generated/attribute_schema.py +2 -2
  70. infrahub/core/schema/manager.py +22 -1
  71. infrahub/core/schema/schema_branch.py +180 -22
  72. infrahub/core/schema/schema_branch_display.py +12 -0
  73. infrahub/core/schema/schema_branch_hfid.py +6 -0
  74. infrahub/core/validators/uniqueness/checker.py +2 -1
  75. infrahub/database/__init__.py +0 -13
  76. infrahub/database/graph.py +21 -0
  77. infrahub/display_labels/tasks.py +13 -7
  78. infrahub/events/branch_action.py +27 -1
  79. infrahub/generators/tasks.py +3 -7
  80. infrahub/git/base.py +4 -1
  81. infrahub/git/integrator.py +1 -1
  82. infrahub/git/models.py +2 -1
  83. infrahub/git/repository.py +22 -5
  84. infrahub/git/tasks.py +66 -10
  85. infrahub/git/utils.py +123 -1
  86. infrahub/graphql/analyzer.py +9 -0
  87. infrahub/graphql/api/endpoints.py +14 -4
  88. infrahub/graphql/manager.py +4 -9
  89. infrahub/graphql/mutations/branch.py +5 -0
  90. infrahub/graphql/mutations/convert_object_type.py +11 -1
  91. infrahub/graphql/mutations/display_label.py +17 -10
  92. infrahub/graphql/mutations/hfid.py +17 -10
  93. infrahub/graphql/mutations/ipam.py +54 -35
  94. infrahub/graphql/mutations/main.py +27 -28
  95. infrahub/graphql/mutations/proposed_change.py +6 -0
  96. infrahub/graphql/schema_sort.py +170 -0
  97. infrahub/graphql/types/branch.py +4 -1
  98. infrahub/graphql/types/enums.py +3 -0
  99. infrahub/hfid/tasks.py +13 -7
  100. infrahub/lock.py +52 -12
  101. infrahub/message_bus/types.py +3 -1
  102. infrahub/permissions/constants.py +2 -0
  103. infrahub/profiles/queries/get_profile_data.py +4 -5
  104. infrahub/proposed_change/tasks.py +66 -23
  105. infrahub/server.py +6 -2
  106. infrahub/services/__init__.py +2 -2
  107. infrahub/services/adapters/http/__init__.py +5 -0
  108. infrahub/services/adapters/workflow/worker.py +14 -3
  109. infrahub/task_manager/event.py +5 -0
  110. infrahub/task_manager/models.py +7 -0
  111. infrahub/task_manager/task.py +73 -0
  112. infrahub/trigger/setup.py +13 -4
  113. infrahub/trigger/tasks.py +3 -0
  114. infrahub/workers/dependencies.py +10 -1
  115. infrahub/workers/infrahub_async.py +10 -2
  116. infrahub/workflows/catalogue.py +8 -0
  117. infrahub/workflows/initialization.py +5 -0
  118. infrahub/workflows/utils.py +2 -1
  119. infrahub_sdk/analyzer.py +1 -1
  120. infrahub_sdk/batch.py +2 -2
  121. infrahub_sdk/branch.py +14 -2
  122. infrahub_sdk/checks.py +1 -1
  123. infrahub_sdk/client.py +15 -14
  124. infrahub_sdk/config.py +29 -2
  125. infrahub_sdk/ctl/branch.py +3 -0
  126. infrahub_sdk/ctl/cli_commands.py +2 -0
  127. infrahub_sdk/ctl/exceptions.py +1 -1
  128. infrahub_sdk/ctl/schema.py +22 -7
  129. infrahub_sdk/ctl/task.py +110 -0
  130. infrahub_sdk/exceptions.py +18 -18
  131. infrahub_sdk/graphql/query.py +2 -2
  132. infrahub_sdk/node/attribute.py +1 -1
  133. infrahub_sdk/node/property.py +1 -1
  134. infrahub_sdk/node/related_node.py +3 -3
  135. infrahub_sdk/node/relationship.py +4 -6
  136. infrahub_sdk/object_store.py +2 -2
  137. infrahub_sdk/operation.py +1 -1
  138. infrahub_sdk/protocols_generator/generator.py +1 -1
  139. infrahub_sdk/pytest_plugin/exceptions.py +9 -9
  140. infrahub_sdk/pytest_plugin/items/base.py +1 -1
  141. infrahub_sdk/pytest_plugin/items/check.py +1 -1
  142. infrahub_sdk/pytest_plugin/items/python_transform.py +1 -1
  143. infrahub_sdk/repository.py +1 -1
  144. infrahub_sdk/schema/__init__.py +33 -5
  145. infrahub_sdk/spec/models.py +7 -0
  146. infrahub_sdk/spec/object.py +41 -102
  147. infrahub_sdk/spec/processors/__init__.py +0 -0
  148. infrahub_sdk/spec/processors/data_processor.py +10 -0
  149. infrahub_sdk/spec/processors/factory.py +34 -0
  150. infrahub_sdk/spec/processors/range_expand_processor.py +56 -0
  151. infrahub_sdk/task/exceptions.py +4 -4
  152. infrahub_sdk/task/manager.py +2 -2
  153. infrahub_sdk/task/models.py +6 -4
  154. infrahub_sdk/timestamp.py +1 -1
  155. infrahub_sdk/transfer/exporter/json.py +1 -1
  156. infrahub_sdk/transfer/importer/json.py +1 -1
  157. infrahub_sdk/transforms.py +1 -1
  158. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/METADATA +4 -2
  159. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/RECORD +168 -152
  160. infrahub_testcontainers/container.py +144 -6
  161. infrahub_testcontainers/docker-compose-cluster.test.yml +5 -0
  162. infrahub_testcontainers/docker-compose.test.yml +5 -0
  163. infrahub_testcontainers/helpers.py +19 -4
  164. infrahub_testcontainers/models.py +8 -6
  165. infrahub_testcontainers/performance_test.py +6 -4
  166. infrahub/core/migrations/graph/m040_profile_attrs_in_db.py +0 -166
  167. infrahub/core/migrations/graph/m041_create_hfid_display_label_in_db.py +0 -97
  168. infrahub/core/migrations/graph/m042_backfill_hfid_display_label_in_db.py +0 -86
  169. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/LICENSE.txt +0 -0
  170. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/WHEEL +0 -0
  171. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/entry_points.txt +0 -0
@@ -57,18 +57,17 @@ CALL (profile, attr) {
57
57
  ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
58
58
  RETURN r.status = "active" AS is_active
59
59
  }
60
- WITH profile, attr, is_active
60
+ WITH profile, attr
61
61
  WHERE is_active = TRUE
62
62
  // --------------
63
63
  // get the attribute values
64
64
  // --------------
65
- MATCH (attr)-[:HAS_VALUE]->(av:AttributeValue)
66
- WITH DISTINCT profile, attr, av
67
- CALL (attr, av) {
65
+ CALL (attr) {
68
66
  MATCH (attr)-[r:HAS_VALUE]->(av)
69
67
  WHERE %(branch_filter)s
70
68
  ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
71
- RETURN r.status = "active" AS is_active
69
+ RETURN av, r.status = "active" AS is_active
70
+ LIMIT 1
72
71
  }
73
72
  WITH profile, attr, av
74
73
  WHERE is_active = TRUE
@@ -8,9 +8,10 @@ from pathlib import Path
8
8
  from typing import TYPE_CHECKING
9
9
 
10
10
  import pytest
11
- from infrahub_sdk.exceptions import ModuleImportError
11
+ from infrahub_sdk.exceptions import ModuleImportError, NodeNotFoundError, URLNotFoundError
12
12
  from infrahub_sdk.node import InfrahubNode
13
13
  from infrahub_sdk.protocols import (
14
+ CoreArtifactDefinition,
14
15
  CoreArtifactValidator,
15
16
  CoreGeneratorDefinition,
16
17
  CoreGeneratorValidator,
@@ -58,6 +59,9 @@ from infrahub.generators.models import ProposedChangeGeneratorDefinition
58
59
  from infrahub.git.base import extract_repo_file_information
59
60
  from infrahub.git.models import TriggerRepositoryInternalChecks, TriggerRepositoryUserChecks
60
61
  from infrahub.git.repository import InfrahubRepository, get_initialized_repo
62
+ from infrahub.git.utils import fetch_artifact_definition_targets, fetch_proposed_change_generator_definition_targets
63
+ from infrahub.graphql.analyzer import InfrahubGraphQLQueryAnalyzer
64
+ from infrahub.graphql.initialization import prepare_graphql_params
61
65
  from infrahub.log import get_logger
62
66
  from infrahub.message_bus.types import (
63
67
  ProposedChangeArtifactDefinition,
@@ -528,7 +532,11 @@ async def run_proposed_change_user_tests(model: RequestProposedChangeUserTests)
528
532
  log = get_run_logger()
529
533
  client = get_client()
530
534
 
531
- proposed_change = await client.get(kind=InfrahubKind.PROPOSEDCHANGE, id=model.proposed_change)
535
+ try:
536
+ proposed_change = await client.get(kind=CoreProposedChange, id=model.proposed_change)
537
+ except NodeNotFoundError:
538
+ log.warning(f"Proposed change ({model.proposed_change}) not found, skipping user tests execution")
539
+ return
532
540
 
533
541
  def _execute(
534
542
  directory: Path, repository: ProposedChangeRepository, proposed_change: InfrahubNode
@@ -616,7 +624,7 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
616
624
  client = get_client()
617
625
 
618
626
  artifact_definition = await client.get(
619
- kind=InfrahubKind.ARTIFACTDEFINITION,
627
+ kind=CoreArtifactDefinition,
620
628
  id=model.artifact_definition.definition_id,
621
629
  branch=model.source_branch,
622
630
  )
@@ -656,9 +664,9 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
656
664
  branch=model.source_branch,
657
665
  )
658
666
 
659
- await artifact_definition.targets.fetch()
660
- group = artifact_definition.targets.peer
661
- await group.members.fetch()
667
+ group = await fetch_artifact_definition_targets(
668
+ client=client, branch=model.source_branch, definition=artifact_definition
669
+ )
662
670
 
663
671
  artifacts_by_member = {}
664
672
  for artifact in existing_artifacts:
@@ -667,6 +675,27 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
667
675
  repository = model.branch_diff.get_repository(repository_id=model.artifact_definition.repository_id)
668
676
  impacted_artifacts = model.branch_diff.get_subscribers_ids(kind=InfrahubKind.ARTIFACT)
669
677
 
678
+ source_schema_branch = registry.schema.get_schema_branch(name=model.source_branch)
679
+ source_branch = registry.get_branch_from_registry(branch=model.source_branch)
680
+
681
+ graphql_params = await prepare_graphql_params(db=await get_database(), branch=model.source_branch)
682
+ query_analyzer = InfrahubGraphQLQueryAnalyzer(
683
+ query=model.artifact_definition.query_payload,
684
+ branch=source_branch,
685
+ schema_branch=source_schema_branch,
686
+ schema=graphql_params.schema,
687
+ )
688
+
689
+ only_has_unique_targets = query_analyzer.query_report.only_has_unique_targets
690
+ if not only_has_unique_targets:
691
+ log.warning(
692
+ f"Artifact definition {artifact_definition.name.value} query does not guarantee unique targets. All targets will be processed."
693
+ )
694
+
695
+ managed_branch = model.source_branch_sync_with_git and model.branch_diff.has_file_modifications
696
+ if managed_branch:
697
+ log.info("Source branch is synced with Git repositories with updates, all artifacts will be processed")
698
+
670
699
  checks = []
671
700
 
672
701
  for relationship in group.members.peers:
@@ -674,8 +703,9 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
674
703
  artifact_id = artifacts_by_member.get(member.id)
675
704
  if _should_render_artifact(
676
705
  artifact_id=artifact_id,
677
- managed_branch=model.source_branch_sync_with_git,
706
+ managed_branch=managed_branch,
678
707
  impacted_artifacts=impacted_artifacts,
708
+ only_has_unique_targets=only_has_unique_targets,
679
709
  ):
680
710
  log.info(f"Trigger Artifact processing for {member.display_label}")
681
711
 
@@ -695,6 +725,7 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
695
725
  repository_kind=repository.kind,
696
726
  branch_name=model.source_branch,
697
727
  query=model.artifact_definition.query_name,
728
+ query_id=model.artifact_definition.query_id,
698
729
  variables=await member.extract(params=artifact_definition.parameters.value),
699
730
  target_id=member.id,
700
731
  target_kind=member.get_kind(),
@@ -720,21 +751,26 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
720
751
  )
721
752
 
722
753
 
723
- def _should_render_artifact(artifact_id: str | None, managed_branch: bool, impacted_artifacts: list[str]) -> bool: # noqa: ARG001
754
+ def _should_render_artifact(
755
+ artifact_id: str | None,
756
+ managed_branch: bool,
757
+ impacted_artifacts: list[str],
758
+ only_has_unique_targets: bool,
759
+ ) -> bool:
724
760
  """Returns a boolean to indicate if an artifact should be generated or not.
725
761
  Will return true if:
726
762
  * The artifact_id wasn't set which could be that it's a new object that doesn't have a previous artifact
727
- * The source brance is not data only which would indicate that it could contain updates in git to the transform
763
+ * The source branch is not data only which would indicate that it could contain updates in git to the transform
728
764
  * The artifact_id exists in the impacted_artifacts list
765
+ * The query failes the only_has_unique_targets check
729
766
  Will return false if:
730
767
  * The source branch is a data only branch and the artifact_id exists and is not in the impacted list
731
768
  """
732
769
 
733
- # if not artifact_id or managed_branch:
734
- # return True
735
- # return artifact_id in impacted_artifacts
736
- # Temporary workaround tracked in https://github.com/opsmill/infrahub/issues/4991
737
- return True
770
+ if not only_has_unique_targets or not artifact_id or managed_branch:
771
+ return True
772
+
773
+ return artifact_id in impacted_artifacts
738
774
 
739
775
 
740
776
  @flow(
@@ -925,14 +961,9 @@ async def request_generator_definition_check(model: RequestGeneratorDefinitionCh
925
961
  branch=model.source_branch,
926
962
  )
927
963
 
928
- group = await client.get(
929
- kind=InfrahubKind.GENERICGROUP,
930
- prefetch_relationships=True,
931
- populate_store=True,
932
- id=model.generator_definition.group_id,
933
- branch=model.source_branch,
964
+ group = await fetch_proposed_change_generator_definition_targets(
965
+ client=client, branch=model.source_branch, definition=model.generator_definition
934
966
  )
935
- await group.members.fetch()
936
967
 
937
968
  instance_by_member = {}
938
969
  for instance in existing_instances:
@@ -1254,12 +1285,16 @@ query GatherArtifactDefinitions {
1254
1285
  }
1255
1286
  query {
1256
1287
  node {
1288
+ id
1257
1289
  models {
1258
1290
  value
1259
1291
  }
1260
1292
  name {
1261
1293
  value
1262
1294
  }
1295
+ query {
1296
+ value
1297
+ }
1263
1298
  }
1264
1299
  }
1265
1300
  ... on CoreTransformJinja2 {
@@ -1475,7 +1510,9 @@ def _parse_artifact_definitions(definitions: list[dict]) -> list[ProposedChangeA
1475
1510
  content_type=definition["node"]["content_type"]["value"],
1476
1511
  timeout=definition["node"]["transformation"]["node"]["timeout"]["value"],
1477
1512
  query_name=definition["node"]["transformation"]["node"]["query"]["node"]["name"]["value"],
1513
+ query_id=definition["node"]["transformation"]["node"]["query"]["node"]["id"],
1478
1514
  query_models=definition["node"]["transformation"]["node"]["query"]["node"]["models"]["value"] or [],
1515
+ query_payload=definition["node"]["transformation"]["node"]["query"]["node"]["query"]["value"],
1479
1516
  repository_id=definition["node"]["transformation"]["node"]["repository"]["node"]["id"],
1480
1517
  transform_kind=definition["node"]["transformation"]["node"]["__typename"],
1481
1518
  )
@@ -1499,8 +1536,14 @@ async def _get_proposed_change_repositories(
1499
1536
  destination_all = await client.execute_graphql(
1500
1537
  query=DESTINATION_ALLREPOSITORIES, branch_name=model.destination_branch
1501
1538
  )
1502
- source_managed = await client.execute_graphql(query=SOURCE_REPOSITORIES, branch_name=model.source_branch)
1503
- source_readonly = await client.execute_graphql(query=SOURCE_READONLY_REPOSITORIES, branch_name=model.source_branch)
1539
+ try:
1540
+ source_managed = await client.execute_graphql(query=SOURCE_REPOSITORIES, branch_name=model.source_branch)
1541
+ source_readonly = await client.execute_graphql(
1542
+ query=SOURCE_READONLY_REPOSITORIES, branch_name=model.source_branch
1543
+ )
1544
+ except URLNotFoundError:
1545
+ # If the URL is not found it means that the source branch has been deleted after the proposed change was created
1546
+ return []
1504
1547
 
1505
1548
  destination_all = destination_all[InfrahubKind.GENERICREPOSITORY]["edges"]
1506
1549
  source_all = (
infrahub/server.py CHANGED
@@ -24,6 +24,7 @@ from infrahub.api.exception_handlers import generic_api_exception_handler
24
24
  from infrahub.components import ComponentType
25
25
  from infrahub.constants.environment import INSTALLATION_TYPE
26
26
  from infrahub.core.initialization import initialization
27
+ from infrahub.database.graph import validate_graph_version
27
28
  from infrahub.dependencies.registry import build_component_registry
28
29
  from infrahub.exceptions import Error, ValidationError
29
30
  from infrahub.graphql.api.endpoints import router as graphql_router
@@ -83,10 +84,13 @@ async def app_initialization(application: FastAPI, enable_scheduler: bool = True
83
84
  initialize_lock(service=service)
84
85
  # We must initialize DB after initialize lock and initialize lock depends on cache initialization
85
86
  async with application.state.db.start_session() as db:
86
- await initialization(db=db, add_database_indexes=True)
87
+ is_initial_setup = await initialization(db=db, add_database_indexes=True)
88
+
89
+ async with database.start_session() as dbs:
90
+ await validate_graph_version(db=dbs)
87
91
 
88
92
  # Initialize the workflow after the registry has been setup
89
- await service.initialize_workflow()
93
+ await service.initialize_workflow(is_initial_setup=is_initial_setup)
90
94
 
91
95
  application.state.service = service
92
96
  application.state.response_delay = config.SETTINGS.miscellaneous.response_delay
@@ -112,7 +112,7 @@ class InfrahubServices:
112
112
 
113
113
  return service
114
114
 
115
- async def initialize_workflow(self) -> None:
115
+ async def initialize_workflow(self, is_initial_setup: bool = False) -> None:
116
116
  if self.workflow is not None and isinstance(self.workflow, WorkflowWorkerExecution):
117
117
  assert self.component is not None
118
118
  # Ideally `WorkflowWorkerExecution.initialize` would be directly part of WorkflowWorkerExecution
@@ -120,7 +120,7 @@ class InfrahubServices:
120
120
  # after workflow instantiation.
121
121
  await self.component.refresh_heartbeat()
122
122
  is_primary = await self.component.is_primary_gunicorn_worker()
123
- await self.workflow.initialize(component_is_primary_server=is_primary)
123
+ await self.workflow.initialize(component_is_primary_server=is_primary, is_initial_setup=is_initial_setup)
124
124
 
125
125
  @property
126
126
  def component(self) -> InfrahubComponent:
@@ -3,10 +3,15 @@ from __future__ import annotations
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
5
  if TYPE_CHECKING:
6
+ import ssl
7
+
6
8
  import httpx
7
9
 
8
10
 
9
11
  class InfrahubHTTP:
12
+ def verify_tls(self, verify: bool | None = None) -> bool | ssl.SSLContext:
13
+ raise NotImplementedError()
14
+
10
15
  async def get(
11
16
  self,
12
17
  url: str,
@@ -3,10 +3,12 @@ from __future__ import annotations
3
3
  from typing import TYPE_CHECKING, Any, overload
4
4
 
5
5
  from prefect.client.schemas.objects import StateType
6
+ from prefect.context import AsyncClientContext
6
7
  from prefect.deployments import run_deployment
7
8
 
9
+ from infrahub.services.adapters.http.httpx import HttpxAdapter
8
10
  from infrahub.workers.utils import inject_context_parameter
9
- from infrahub.workflows.initialization import setup_task_manager
11
+ from infrahub.workflows.initialization import setup_task_manager, setup_task_manager_identifiers
10
12
  from infrahub.workflows.models import WorkflowInfo
11
13
 
12
14
  from . import InfrahubWorkflow, Return
@@ -19,11 +21,19 @@ if TYPE_CHECKING:
19
21
 
20
22
 
21
23
  class WorkflowWorkerExecution(InfrahubWorkflow):
24
+ # This is required to grab a cached SSLContext from the HttpAdapter.
25
+ # We cannot use the get_http() dependency since it introduces a circular dependency.
26
+ # We could remove this later on by introducing a cached SSLContext outside of this adapter.
27
+ _http_adapter = HttpxAdapter()
28
+
22
29
  @staticmethod
23
- async def initialize(component_is_primary_server: bool) -> None:
30
+ async def initialize(component_is_primary_server: bool, is_initial_setup: bool = False) -> None:
24
31
  if component_is_primary_server:
25
32
  await setup_task_manager()
26
33
 
34
+ if is_initial_setup:
35
+ await setup_task_manager_identifiers()
36
+
27
37
  @overload
28
38
  async def execute_workflow(
29
39
  self,
@@ -79,5 +89,6 @@ class WorkflowWorkerExecution(InfrahubWorkflow):
79
89
  parameters = dict(parameters) if parameters is not None else {}
80
90
  inject_context_parameter(func=flow_func, parameters=parameters, context=context)
81
91
 
82
- flow_run = await run_deployment(name=workflow.full_name, timeout=0, parameters=parameters or {}, tags=tags) # type: ignore[return-value, misc]
92
+ async with AsyncClientContext(httpx_settings={"verify": self._http_adapter.verify_tls()}):
93
+ flow_run = await run_deployment(name=workflow.full_name, timeout=0, parameters=parameters or {}, tags=tags) # type: ignore[return-value, misc]
83
94
  return WorkflowInfo.from_flow(flow_run=flow_run)
@@ -160,6 +160,9 @@ class PrefectEventData(PrefectEventModel):
160
160
  def _return_branch_rebased(self) -> dict[str, Any]:
161
161
  return {"rebased_branch": self._get_branch_name_from_resource()}
162
162
 
163
+ def _return_branch_migrated(self) -> dict[str, Any]:
164
+ return {"migrated_branch": self._get_branch_name_from_resource()}
165
+
163
166
  def _return_group_event(self) -> dict[str, Any]:
164
167
  members = []
165
168
  ancestors = []
@@ -228,6 +231,8 @@ class PrefectEventData(PrefectEventModel):
228
231
  event_specifics = self._return_branch_deleted()
229
232
  case "infrahub.branch.merged":
230
233
  event_specifics = self._return_branch_merged()
234
+ case "infrahub.branch.migrated":
235
+ event_specifics = self._return_branch_migrated()
231
236
  case "infrahub.branch.rebased":
232
237
  event_specifics = self._return_branch_rebased()
233
238
  case "infrahub.group.member_added" | "infrahub.group.member_removed":
@@ -141,6 +141,13 @@ class InfrahubEventFilter(EventFilter):
141
141
  if branches:
142
142
  self.resource = EventResourceFilter(labels=ResourceSpecification({"infrahub.branch.name": branches}))
143
143
 
144
+ if branch_migrated := event_type_filter.get("branch_migrated"):
145
+ branches = branch_migrated.get("branches") or []
146
+ if "infrahub.branch.created" not in event_type:
147
+ event_type.append("infrahub.branch.migrated")
148
+ if branches:
149
+ self.resource = EventResourceFilter(labels=ResourceSpecification({"infrahub.branch.name": branches}))
150
+
144
151
  if branch_rebased := event_type_filter.get("branch_rebased"):
145
152
  branches = branch_rebased.get("branches") or []
146
153
  if "infrahub.branch.created" not in event_type:
@@ -1,7 +1,10 @@
1
+ import asyncio
1
2
  import uuid
3
+ from datetime import datetime, timedelta, timezone
2
4
  from typing import Any
3
5
  from uuid import UUID
4
6
 
7
+ from prefect import State
5
8
  from prefect.client.orchestration import PrefectClient, get_client
6
9
  from prefect.client.schemas.filters import (
7
10
  ArtifactFilter,
@@ -12,6 +15,7 @@ from prefect.client.schemas.filters import (
12
15
  FlowRunFilter,
13
16
  FlowRunFilterId,
14
17
  FlowRunFilterName,
18
+ FlowRunFilterStartTime,
15
19
  FlowRunFilterState,
16
20
  FlowRunFilterStateType,
17
21
  FlowRunFilterTags,
@@ -311,3 +315,72 @@ class PrefectTask:
311
315
  )
312
316
 
313
317
  return {"count": count or 0, "edges": nodes}
318
+
319
+ @classmethod
320
+ async def delete_flow_runs(
321
+ cls,
322
+ states: list[StateType] = [StateType.COMPLETED, StateType.FAILED, StateType.CANCELLED], # noqa: B006
323
+ delete: bool = True,
324
+ days_to_keep: int = 2,
325
+ batch_size: int = 100,
326
+ ) -> None:
327
+ """Delete flow runs in the specified states and older than specified days."""
328
+
329
+ logger = get_logger()
330
+
331
+ async with get_client(sync_client=False) as client:
332
+ cutoff = datetime.now(timezone.utc) - timedelta(days=days_to_keep)
333
+
334
+ flow_run_filter = FlowRunFilter(
335
+ start_time=FlowRunFilterStartTime(before_=cutoff), # type: ignore[arg-type]
336
+ state=FlowRunFilterState(type=FlowRunFilterStateType(any_=states)),
337
+ )
338
+
339
+ # Get flow runs to delete
340
+ flow_runs = await client.read_flow_runs(flow_run_filter=flow_run_filter, limit=batch_size)
341
+
342
+ deleted_total = 0
343
+
344
+ while True:
345
+ batch_deleted = 0
346
+ failed_deletes = []
347
+
348
+ # Delete each flow run through the API
349
+ for flow_run in flow_runs:
350
+ try:
351
+ if delete:
352
+ await client.delete_flow_run(flow_run_id=flow_run.id)
353
+ else:
354
+ await client.set_flow_run_state(
355
+ flow_run_id=flow_run.id,
356
+ state=State(type=StateType.CRASHED),
357
+ force=True,
358
+ )
359
+ deleted_total += 1
360
+ batch_deleted += 1
361
+ except Exception as e:
362
+ logger.warning(f"Failed to delete flow run {flow_run.id}: {e}")
363
+ failed_deletes.append(flow_run.id)
364
+
365
+ # Rate limiting
366
+ if batch_deleted % 10 == 0:
367
+ await asyncio.sleep(0.5)
368
+
369
+ logger.info(f"Delete {batch_deleted}/{len(flow_runs)} flow runs (total: {deleted_total})")
370
+
371
+ # Get next batch
372
+ previous_flow_run_ids = [fr.id for fr in flow_runs]
373
+ flow_runs = await client.read_flow_runs(flow_run_filter=flow_run_filter, limit=batch_size)
374
+
375
+ if not flow_runs:
376
+ logger.info("No more flow runs to delete")
377
+ break
378
+
379
+ if previous_flow_run_ids == [fr.id for fr in flow_runs]:
380
+ logger.info("Found same flow runs to delete, aborting")
381
+ break
382
+
383
+ # Delay between batches to avoid overwhelming the API
384
+ await asyncio.sleep(1.0)
385
+
386
+ logger.info(f"Retention complete. Total deleted tasks: {deleted_total}")
infrahub/trigger/setup.py CHANGED
@@ -6,6 +6,7 @@ from prefect.cache_policies import NONE
6
6
  from prefect.client.orchestration import PrefectClient, get_client
7
7
  from prefect.client.schemas.filters import DeploymentFilter, DeploymentFilterName
8
8
  from prefect.events.schemas.automations import Automation
9
+ from prefect.exceptions import PrefectHTTPStatusError
9
10
 
10
11
  from infrahub import lock
11
12
  from infrahub.database import InfrahubDatabase
@@ -51,7 +52,7 @@ async def setup_triggers_specific(
51
52
  ) # type: ignore[misc]
52
53
 
53
54
 
54
- @task(name="trigger-setup", task_run_name="Setup triggers", cache_policy=NONE) # type: ignore[arg-type]
55
+ @task(name="trigger-setup", task_run_name="Setup triggers", cache_policy=NONE)
55
56
  async def setup_triggers(
56
57
  client: PrefectClient,
57
58
  triggers: list[TriggerDefinition],
@@ -83,7 +84,9 @@ async def setup_triggers(
83
84
  existing_automations: dict[str, Automation] = {}
84
85
  if trigger_type:
85
86
  existing_automations = {
86
- item.name: item for item in await client.read_automations() if item.name.startswith(trigger_type.value)
87
+ item.name: item
88
+ for item in await client.read_automations()
89
+ if item.name.startswith(f"{trigger_type.value}::")
87
90
  }
88
91
  else:
89
92
  existing_automations = {item.name: item for item in await client.read_automations()}
@@ -133,8 +136,14 @@ async def setup_triggers(
133
136
  continue
134
137
 
135
138
  report.deleted.append(existing_automation)
136
- await client.delete_automation(automation_id=existing_automation.id)
137
- log.info(f"{item_to_delete} Deleted")
139
+ try:
140
+ await client.delete_automation(automation_id=existing_automation.id)
141
+ log.info(f"{item_to_delete} Deleted")
142
+ except PrefectHTTPStatusError as exc:
143
+ if exc.response.status_code == 404:
144
+ log.info(f"{item_to_delete} was already deleted")
145
+ else:
146
+ raise
138
147
 
139
148
  if trigger_type:
140
149
  log.info(
infrahub/trigger/tasks.py CHANGED
@@ -7,6 +7,7 @@ from infrahub.computed_attribute.gather import (
7
7
  gather_trigger_computed_attribute_python,
8
8
  )
9
9
  from infrahub.display_labels.gather import gather_trigger_display_labels_jinja2
10
+ from infrahub.hfid.gather import gather_trigger_hfid
10
11
  from infrahub.trigger.catalogue import builtin_triggers
11
12
  from infrahub.webhook.gather import gather_trigger_webhook
12
13
  from infrahub.workers.dependencies import get_database
@@ -20,6 +21,7 @@ async def trigger_configure_all() -> None:
20
21
  async with database.start_session() as db:
21
22
  webhook_trigger = await gather_trigger_webhook(db=db)
22
23
  display_label_triggers = await gather_trigger_display_labels_jinja2()
24
+ human_friendly_id_triggers = await gather_trigger_hfid()
23
25
  computed_attribute_j2_triggers = await gather_trigger_computed_attribute_jinja2()
24
26
  (
25
27
  computed_attribute_python_triggers,
@@ -31,6 +33,7 @@ async def trigger_configure_all() -> None:
31
33
  + computed_attribute_python_triggers
32
34
  + computed_attribute_python_query_triggers
33
35
  + display_label_triggers
36
+ + human_friendly_id_triggers
34
37
  + builtin_triggers
35
38
  + webhook_trigger
36
39
  + action_rules
@@ -7,6 +7,7 @@ from infrahub_sdk.config import Config
7
7
  from infrahub import config
8
8
  from infrahub.components import ComponentType
9
9
  from infrahub.constants.environment import INSTALLATION_TYPE
10
+ from infrahub.core.registry import registry
10
11
  from infrahub.database import InfrahubDatabase, get_db
11
12
  from infrahub.services.adapters.cache import InfrahubCache
12
13
  from infrahub.services.adapters.event import InfrahubEventService
@@ -34,7 +35,15 @@ def get_component_type() -> ComponentType:
34
35
 
35
36
 
36
37
  def build_client() -> InfrahubClient:
37
- return InfrahubClient(config=Config(address=config.SETTINGS.main.internal_address, retry_on_failure=True))
38
+ client_config = Config(address=config.SETTINGS.main.internal_address, retry_on_failure=True)
39
+ client_config.set_ssl_context(context=get_http().verify_tls())
40
+ client = InfrahubClient(config=client_config)
41
+ # Populate client schema cache using our internal schema cache
42
+ if registry.schema:
43
+ for branch in registry.schema.get_branches():
44
+ client.schema.set_cache(schema=registry.schema.get_sdk_schema_branch(name=branch), branch=branch)
45
+
46
+ return client
38
47
 
39
48
 
40
49
  @inject
@@ -8,6 +8,7 @@ from infrahub_sdk import Config, InfrahubClient
8
8
  from infrahub_sdk.exceptions import Error as SdkError
9
9
  from prefect import settings as prefect_settings
10
10
  from prefect.client.schemas.objects import FlowRun
11
+ from prefect.context import AsyncClientContext
11
12
  from prefect.flow_engine import run_flow_async
12
13
  from prefect.logging.handlers import APILogHandler
13
14
  from prefect.workers.base import BaseJobConfiguration, BaseVariables, BaseWorker, BaseWorkerResult
@@ -18,6 +19,7 @@ from infrahub import config
18
19
  from infrahub.components import ComponentType
19
20
  from infrahub.core import registry
20
21
  from infrahub.core.initialization import initialization
22
+ from infrahub.database.graph import validate_graph_version
21
23
  from infrahub.dependencies.registry import build_component_registry
22
24
  from infrahub.git import initialize_repositories_directory
23
25
  from infrahub.lock import initialize_lock
@@ -27,6 +29,7 @@ from infrahub.workers.dependencies import (
27
29
  get_cache,
28
30
  get_component,
29
31
  get_database,
32
+ get_http,
30
33
  get_message_bus,
31
34
  get_workflow,
32
35
  set_component_type,
@@ -129,6 +132,9 @@ class InfrahubWorkerAsync(BaseWorker):
129
132
 
130
133
  await self.service.component.refresh_schema_hash()
131
134
 
135
+ async with self.service.database.start_session() as dbs:
136
+ await validate_graph_version(db=dbs)
137
+
132
138
  initialize_repositories_directory()
133
139
  build_component_registry()
134
140
  await self.service.scheduler.start_schedule()
@@ -138,7 +144,7 @@ class InfrahubWorkerAsync(BaseWorker):
138
144
  self,
139
145
  flow_run: FlowRun,
140
146
  configuration: BaseJobConfiguration,
141
- task_status: TaskStatus | None = None,
147
+ task_status: TaskStatus[int] | None = None,
142
148
  ) -> BaseWorkerResult:
143
149
  flow_run_logger = self.get_flow_run_logger(flow_run)
144
150
 
@@ -154,7 +160,9 @@ class InfrahubWorkerAsync(BaseWorker):
154
160
  if task_status:
155
161
  task_status.started(True)
156
162
 
157
- await run_flow_async(flow=flow_func, flow_run=flow_run, parameters=params, return_type="state")
163
+ async with AsyncClientContext(httpx_settings={"verify": get_http().verify_tls()}) as ctx:
164
+ ctx._httpx_settings = None # Hack to make all child task/flow runs use the same client
165
+ await run_flow_async(flow=flow_func, flow_run=flow_run, parameters=params, return_type="state")
158
166
 
159
167
  return InfrahubWorkerAsyncResult(status_code=0, identifier=str(flow_run.id))
160
168
 
@@ -230,6 +230,13 @@ BRANCH_REBASE = WorkflowDefinition(
230
230
  function="rebase_branch",
231
231
  tags=[WorkflowTag.DATABASE_CHANGE],
232
232
  )
233
+ BRANCH_MIGRATE = WorkflowDefinition(
234
+ name="branch-migrate",
235
+ type=WorkflowType.CORE,
236
+ module="infrahub.core.branch.tasks",
237
+ function="migrate_branch",
238
+ tags=[WorkflowTag.DATABASE_CHANGE],
239
+ )
233
240
 
234
241
  BRANCH_CREATE = WorkflowDefinition(
235
242
  name="create-branch",
@@ -641,6 +648,7 @@ WORKFLOWS = [
641
648
  BRANCH_MERGED,
642
649
  BRANCH_MERGE_MUTATION,
643
650
  BRANCH_MERGE_POST_PROCESS,
651
+ BRANCH_MIGRATE,
644
652
  BRANCH_REBASE,
645
653
  BRANCH_VALIDATE,
646
654
  CLEAN_UP_DEADLOCKS,
@@ -76,6 +76,11 @@ async def setup_task_manager() -> None:
76
76
  await setup_triggers(
77
77
  client=client, triggers=builtin_triggers, trigger_type=TriggerType.BUILTIN, force_update=True
78
78
  )
79
+
80
+
81
+ @flow(name="task-manager-identifiers", flow_run_name="Setup Task Manager Display Labels and HFID")
82
+ async def setup_task_manager_identifiers() -> None:
83
+ async with get_client(sync_client=False) as client:
79
84
  display_label_triggers = await gather_trigger_display_labels_jinja2()
80
85
  await setup_triggers(
81
86
  client=client,
@@ -9,6 +9,7 @@ from prefect.runtime import flow_run
9
9
  from infrahub.core.constants import GLOBAL_BRANCH_NAME
10
10
  from infrahub.core.registry import registry
11
11
  from infrahub.tasks.registry import refresh_branches
12
+ from infrahub.workers.dependencies import get_http
12
13
 
13
14
  from .constants import TAG_NAMESPACE, WorkflowTag
14
15
 
@@ -26,7 +27,7 @@ async def add_tags(
26
27
  namespace: bool = True,
27
28
  db_change: bool = False,
28
29
  ) -> None:
29
- client = get_client(sync_client=False)
30
+ client = get_client(httpx_settings={"verify": get_http().verify_tls()}, sync_client=False)
30
31
  current_flow_run_id = flow_run.id
31
32
  current_tags: list[str] = flow_run.tags
32
33
  branch_tags = (
infrahub_sdk/analyzer.py CHANGED
@@ -30,7 +30,7 @@ class GraphQLOperation(BaseModel):
30
30
 
31
31
 
32
32
  class GraphQLQueryAnalyzer:
33
- def __init__(self, query: str, schema: GraphQLSchema | None = None):
33
+ def __init__(self, query: str, schema: GraphQLSchema | None = None) -> None:
34
34
  self.query: str = query
35
35
  self.schema: GraphQLSchema | None = schema
36
36
  self.document: DocumentNode = parse(self.query)