infrahub-server 1.7.0rc0__py3-none-any.whl → 1.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. infrahub/actions/gather.py +2 -2
  2. infrahub/api/query.py +3 -2
  3. infrahub/api/schema.py +5 -0
  4. infrahub/api/transformation.py +3 -3
  5. infrahub/cli/db.py +6 -2
  6. infrahub/computed_attribute/gather.py +2 -0
  7. infrahub/config.py +2 -2
  8. infrahub/core/attribute.py +21 -2
  9. infrahub/core/branch/models.py +11 -117
  10. infrahub/core/branch/tasks.py +7 -3
  11. infrahub/core/diff/merger/merger.py +5 -1
  12. infrahub/core/diff/model/path.py +43 -0
  13. infrahub/core/graph/__init__.py +1 -1
  14. infrahub/core/graph/index.py +2 -0
  15. infrahub/core/initialization.py +2 -1
  16. infrahub/core/ipam/resource_allocator.py +229 -0
  17. infrahub/core/migrations/graph/__init__.py +10 -0
  18. infrahub/core/migrations/graph/m014_remove_index_attr_value.py +3 -2
  19. infrahub/core/migrations/graph/m015_diff_format_update.py +3 -2
  20. infrahub/core/migrations/graph/m016_diff_delete_bug_fix.py +3 -2
  21. infrahub/core/migrations/graph/m017_add_core_profile.py +6 -4
  22. infrahub/core/migrations/graph/m018_uniqueness_nulls.py +3 -4
  23. infrahub/core/migrations/graph/m020_duplicate_edges.py +3 -3
  24. infrahub/core/migrations/graph/m025_uniqueness_nulls.py +3 -4
  25. infrahub/core/migrations/graph/m026_0000_prefix_fix.py +4 -5
  26. infrahub/core/migrations/graph/m028_delete_diffs.py +3 -2
  27. infrahub/core/migrations/graph/m029_duplicates_cleanup.py +3 -2
  28. infrahub/core/migrations/graph/m031_check_number_attributes.py +4 -3
  29. infrahub/core/migrations/graph/m032_cleanup_orphaned_branch_relationships.py +3 -2
  30. infrahub/core/migrations/graph/m034_find_orphaned_schema_fields.py +3 -2
  31. infrahub/core/migrations/graph/m035_orphan_relationships.py +3 -3
  32. infrahub/core/migrations/graph/m036_drop_attr_value_index.py +3 -2
  33. infrahub/core/migrations/graph/m037_index_attr_vals.py +3 -2
  34. infrahub/core/migrations/graph/m038_redo_0000_prefix_fix.py +4 -5
  35. infrahub/core/migrations/graph/m039_ipam_reconcile.py +3 -2
  36. infrahub/core/migrations/graph/m041_deleted_dup_edges.py +3 -2
  37. infrahub/core/migrations/graph/m042_profile_attrs_in_db.py +5 -4
  38. infrahub/core/migrations/graph/m043_create_hfid_display_label_in_db.py +12 -5
  39. infrahub/core/migrations/graph/m044_backfill_hfid_display_label_in_db.py +15 -4
  40. infrahub/core/migrations/graph/m045_backfill_hfid_display_label_in_db_profile_template.py +10 -4
  41. infrahub/core/migrations/graph/m046_fill_agnostic_hfid_display_labels.py +6 -5
  42. infrahub/core/migrations/graph/m047_backfill_or_null_display_label.py +19 -5
  43. infrahub/core/migrations/graph/m048_undelete_rel_props.py +6 -4
  44. infrahub/core/migrations/graph/m049_remove_is_visible_relationship.py +3 -3
  45. infrahub/core/migrations/graph/m050_backfill_vertex_metadata.py +3 -3
  46. infrahub/core/migrations/graph/m051_subtract_branched_from_microsecond.py +39 -0
  47. infrahub/core/migrations/graph/m052_fix_global_branch_level.py +51 -0
  48. infrahub/core/migrations/graph/m053_fix_branch_level_zero.py +61 -0
  49. infrahub/core/migrations/graph/m054_cleanup_orphaned_nodes.py +87 -0
  50. infrahub/core/migrations/graph/m055_remove_webhook_validate_certificates_default.py +86 -0
  51. infrahub/core/migrations/runner.py +6 -3
  52. infrahub/core/migrations/schema/attribute_kind_update.py +8 -11
  53. infrahub/core/migrations/schema/attribute_supports_profile.py +3 -8
  54. infrahub/core/migrations/schema/models.py +8 -0
  55. infrahub/core/migrations/schema/node_attribute_add.py +24 -29
  56. infrahub/core/migrations/schema/tasks.py +7 -1
  57. infrahub/core/migrations/shared.py +37 -30
  58. infrahub/core/node/__init__.py +2 -1
  59. infrahub/core/node/lock_utils.py +23 -2
  60. infrahub/core/node/resource_manager/ip_address_pool.py +5 -11
  61. infrahub/core/node/resource_manager/ip_prefix_pool.py +5 -21
  62. infrahub/core/node/resource_manager/number_pool.py +109 -39
  63. infrahub/core/query/__init__.py +7 -1
  64. infrahub/core/query/branch.py +18 -2
  65. infrahub/core/query/ipam.py +629 -40
  66. infrahub/core/query/node.py +128 -0
  67. infrahub/core/query/resource_manager.py +114 -1
  68. infrahub/core/relationship/model.py +9 -3
  69. infrahub/core/schema/attribute_parameters.py +28 -1
  70. infrahub/core/schema/attribute_schema.py +9 -2
  71. infrahub/core/schema/definitions/core/webhook.py +0 -1
  72. infrahub/core/schema/definitions/internal.py +7 -4
  73. infrahub/core/schema/manager.py +50 -38
  74. infrahub/core/validators/attribute/kind.py +5 -2
  75. infrahub/core/validators/determiner.py +4 -0
  76. infrahub/graphql/analyzer.py +3 -1
  77. infrahub/graphql/app.py +7 -10
  78. infrahub/graphql/execution.py +95 -0
  79. infrahub/graphql/manager.py +8 -2
  80. infrahub/graphql/mutations/proposed_change.py +15 -0
  81. infrahub/graphql/parser.py +10 -7
  82. infrahub/graphql/queries/ipam.py +20 -25
  83. infrahub/graphql/queries/search.py +29 -9
  84. infrahub/lock.py +7 -0
  85. infrahub/proposed_change/tasks.py +2 -0
  86. infrahub/services/adapters/cache/redis.py +7 -0
  87. infrahub/services/adapters/http/httpx.py +27 -0
  88. infrahub/trigger/catalogue.py +2 -0
  89. infrahub/trigger/models.py +73 -4
  90. infrahub/trigger/setup.py +1 -1
  91. infrahub/trigger/system.py +36 -0
  92. infrahub/webhook/models.py +4 -2
  93. infrahub/webhook/tasks.py +2 -2
  94. infrahub/workflows/initialization.py +2 -2
  95. infrahub_sdk/analyzer.py +2 -2
  96. infrahub_sdk/branch.py +12 -39
  97. infrahub_sdk/checks.py +4 -4
  98. infrahub_sdk/client.py +36 -0
  99. infrahub_sdk/ctl/cli_commands.py +2 -1
  100. infrahub_sdk/ctl/graphql.py +15 -4
  101. infrahub_sdk/ctl/utils.py +2 -2
  102. infrahub_sdk/enums.py +6 -0
  103. infrahub_sdk/graphql/renderers.py +21 -0
  104. infrahub_sdk/graphql/utils.py +85 -0
  105. infrahub_sdk/node/attribute.py +12 -2
  106. infrahub_sdk/node/constants.py +11 -0
  107. infrahub_sdk/node/metadata.py +69 -0
  108. infrahub_sdk/node/node.py +65 -14
  109. infrahub_sdk/node/property.py +3 -0
  110. infrahub_sdk/node/related_node.py +24 -1
  111. infrahub_sdk/node/relationship.py +10 -1
  112. infrahub_sdk/operation.py +2 -2
  113. infrahub_sdk/schema/repository.py +1 -2
  114. infrahub_sdk/transforms.py +2 -2
  115. infrahub_sdk/types.py +18 -2
  116. {infrahub_server-1.7.0rc0.dist-info → infrahub_server-1.7.2.dist-info}/METADATA +8 -8
  117. {infrahub_server-1.7.0rc0.dist-info → infrahub_server-1.7.2.dist-info}/RECORD +123 -114
  118. {infrahub_server-1.7.0rc0.dist-info → infrahub_server-1.7.2.dist-info}/entry_points.txt +0 -1
  119. infrahub_testcontainers/docker-compose-cluster.test.yml +16 -10
  120. infrahub_testcontainers/docker-compose.test.yml +11 -10
  121. infrahub_testcontainers/performance_test.py +1 -1
  122. infrahub/pools/address.py +0 -16
  123. {infrahub_server-1.7.0rc0.dist-info → infrahub_server-1.7.2.dist-info}/WHEEL +0 -0
  124. {infrahub_server-1.7.0rc0.dist-info → infrahub_server-1.7.2.dist-info}/licenses/LICENSE.txt +0 -0
@@ -7,11 +7,10 @@ from graphene import Field, Int, ObjectType, String
7
7
  from netaddr import IPSet
8
8
 
9
9
  from infrahub.core.constants import InfrahubKind
10
+ from infrahub.core.ipam.resource_allocator import IPAMResourceAllocator
10
11
  from infrahub.core.manager import NodeManager
11
12
  from infrahub.core.protocols import BuiltinIPPrefix
12
- from infrahub.core.query.ipam import get_ip_addresses, get_subnets
13
13
  from infrahub.exceptions import NodeNotFoundError, ValidationError
14
- from infrahub.pools.address import get_available
15
14
  from infrahub.pools.prefix import get_next_available_prefix
16
15
 
17
16
  if TYPE_CHECKING:
@@ -48,24 +47,15 @@ class IPAddressGetNextAvailable(ObjectType):
48
47
  raise ValidationError(input_value="Invalid prefix length for current selected prefix")
49
48
 
50
49
  namespace = await prefix.ip_namespace.get_peer(db=graphql_context.db) # type: ignore[attr-defined]
51
- addresses = await get_ip_addresses(
52
- db=graphql_context.db,
50
+ allocator = IPAMResourceAllocator(db=graphql_context.db, namespace=namespace, branch=graphql_context.branch)
51
+ next_address = await allocator.get_next_address(
53
52
  ip_prefix=ip_prefix,
54
- namespace=namespace,
55
- branch=graphql_context.branch,
56
- )
57
-
58
- available = get_available(
59
- network=ip_prefix,
60
- addresses=[ip.address for ip in addresses],
61
53
  is_pool=prefix.is_pool.value, # type: ignore[attr-defined]
62
54
  )
63
55
 
64
- if not available:
56
+ if not next_address:
65
57
  raise IndexError("No addresses available in prefix")
66
58
 
67
- next_address = available.iter_cidrs()[0]
68
-
69
59
  return {"address": f"{next_address.ip}/{prefix_length}"}
70
60
 
71
61
 
@@ -90,22 +80,27 @@ class IPPrefixGetNextAvailable(ObjectType):
90
80
  branch_name=graphql_context.branch.name, node_type=InfrahubKind.IPPREFIX, identifier=prefix_id
91
81
  )
92
82
 
83
+ ip_prefix = prefix.prefix.obj
93
84
  namespace = await prefix.ip_namespace.get_peer(db=graphql_context.db)
94
- subnets = await get_subnets(
95
- db=graphql_context.db,
96
- ip_prefix=ipaddress.ip_network(prefix.prefix.value),
97
- namespace=namespace,
98
- branch=graphql_context.branch,
99
- )
85
+ allocator = IPAMResourceAllocator(db=graphql_context.db, namespace=namespace, branch=graphql_context.branch)
100
86
 
101
- pool = IPSet([prefix.prefix.value])
87
+ # Build available pool by removing existing subnets from parent prefix
88
+ subnets = await allocator.get_subnets(ip_prefix=ip_prefix)
89
+ available_pool = IPSet([str(ip_prefix)])
102
90
  for subnet in subnets:
103
- pool.remove(addr=str(subnet.prefix))
91
+ available_pool.remove(str(subnet.prefix))
104
92
 
105
- prefix_ver = ipaddress.ip_network(prefix.prefix.value).version
106
- next_available = get_next_available_prefix(pool=pool, prefix_length=prefix_length, prefix_ver=prefix_ver)
93
+ if prefix_length is not None and not ip_prefix.prefixlen < prefix_length <= ip_prefix.max_prefixlen:
94
+ raise ValidationError(input_value="Invalid prefix length for current selected prefix")
95
+
96
+ try:
97
+ next_prefix = get_next_available_prefix(
98
+ pool=available_pool, prefix_length=prefix_length, prefix_ver=ip_prefix.version
99
+ )
100
+ except ValueError as exc:
101
+ raise IndexError("No prefixes available in prefix") from exc
107
102
 
108
- return {"prefix": str(next_available)}
103
+ return {"prefix": str(next_prefix)}
109
104
 
110
105
 
111
106
  InfrahubIPAddressGetNextAvailable = Field(
@@ -9,12 +9,12 @@ from infrahub_sdk.utils import is_valid_uuid
9
9
 
10
10
  from infrahub.core.constants import InfrahubKind
11
11
  from infrahub.core.manager import NodeManager
12
+ from infrahub.core.query.node import NodeGetListByAttributeValueQuery
12
13
  from infrahub.graphql.field_extractor import extract_graphql_fields
13
14
 
14
15
  if TYPE_CHECKING:
15
16
  from graphql import GraphQLResolveInfo
16
17
 
17
- from infrahub.core.node import Node as InfrahubNode
18
18
  from infrahub.graphql.initialization import GraphqlContext
19
19
 
20
20
 
@@ -104,10 +104,11 @@ async def search_resolver(
104
104
  q: str,
105
105
  limit: int = 10,
106
106
  partial_match: bool = True,
107
+ case_sensitive: bool = False,
107
108
  ) -> dict[str, Any]:
108
109
  graphql_context: GraphqlContext = info.context
109
110
  response: dict[str, Any] = {}
110
- results: list[InfrahubNode] = []
111
+ results: list[dict[str, str]] = []
111
112
 
112
113
  fields = extract_graphql_fields(info=info)
113
114
 
@@ -116,25 +117,43 @@ async def search_resolver(
116
117
  db=graphql_context.db, branch=graphql_context.branch, at=graphql_context.at, id=q
117
118
  )
118
119
  if matching:
119
- results.append(matching)
120
+ results.append({"id": matching.id, "kind": matching.get_kind()})
120
121
  else:
121
122
  with contextlib.suppress(ValueError, ipaddress.AddressValueError):
122
123
  # Convert any IPv6 address, network or partial address to collapsed format as it might be stored in db.
123
124
  q = _collapse_ipv6(q)
124
125
 
125
- for kind in [InfrahubKind.NODE, InfrahubKind.GENERICGROUP]:
126
- objs = await NodeManager.query(
126
+ if case_sensitive:
127
+ # Case-sensitive search using the dedicated query
128
+ query = await NodeGetListByAttributeValueQuery.init(
127
129
  db=graphql_context.db,
128
130
  branch=graphql_context.branch,
129
- schema=kind,
130
- filters={"any__value": q},
131
+ at=graphql_context.at,
132
+ search_value=q,
133
+ kinds=[InfrahubKind.NODE, InfrahubKind.GENERICGROUP],
131
134
  limit=limit,
132
135
  partial_match=partial_match,
133
136
  )
134
- results.extend(objs)
137
+ await query.execute(db=graphql_context.db)
138
+
139
+ for result in query.get_data():
140
+ results.append({"id": result.uuid, "kind": result.kind})
141
+ else:
142
+ # Default: case-insensitive search using NodeManager.query
143
+ for kind in [InfrahubKind.NODE, InfrahubKind.GENERICGROUP]:
144
+ objs = await NodeManager.query(
145
+ db=graphql_context.db,
146
+ branch=graphql_context.branch,
147
+ schema=kind,
148
+ filters={"any__value": q},
149
+ limit=limit,
150
+ partial_match=partial_match,
151
+ )
152
+ for obj in objs:
153
+ results.append({"id": obj.id, "kind": obj.get_kind()})
135
154
 
136
155
  if "edges" in fields:
137
- response["edges"] = [{"node": {"id": obj.id, "kind": obj.get_kind()}} for obj in results]
156
+ response["edges"] = [{"node": result} for result in results]
138
157
 
139
158
  if "count" in fields:
140
159
  response["count"] = len(results)
@@ -147,6 +166,7 @@ InfrahubSearchAnywhere = Field(
147
166
  q=String(required=True),
148
167
  limit=Int(required=False),
149
168
  partial_match=Boolean(required=False),
169
+ case_sensitive=Boolean(required=False),
150
170
  resolver=search_resolver,
151
171
  required=True,
152
172
  )
infrahub/lock.py CHANGED
@@ -10,6 +10,7 @@ from typing import TYPE_CHECKING
10
10
 
11
11
  import redis.asyncio as redis
12
12
  from prometheus_client import Histogram
13
+ from redis import UsernamePasswordCredentialProvider
13
14
  from redis.asyncio.lock import Lock as GlobalLock
14
15
 
15
16
  from infrahub import config
@@ -275,10 +276,16 @@ class InfrahubLockRegistry:
275
276
  ) -> None:
276
277
  if config.SETTINGS.cache.enable and not local_only:
277
278
  if config.SETTINGS.cache.driver == config.CacheDriver.Redis:
279
+ credential_provider: UsernamePasswordCredentialProvider | None = None
280
+ if config.SETTINGS.cache.username and config.SETTINGS.cache.password:
281
+ credential_provider = UsernamePasswordCredentialProvider(
282
+ username=config.SETTINGS.cache.username, password=config.SETTINGS.cache.password
283
+ )
278
284
  self.connection = redis.Redis(
279
285
  host=config.SETTINGS.cache.address,
280
286
  port=config.SETTINGS.cache.service_port,
281
287
  db=config.SETTINGS.cache.database,
288
+ credential_provider=credential_provider,
282
289
  ssl=config.SETTINGS.cache.tls_enabled,
283
290
  ssl_cert_reqs="optional" if not config.SETTINGS.cache.tls_insecure else "none",
284
291
  ssl_check_hostname=not config.SETTINGS.cache.tls_insecure,
@@ -61,6 +61,7 @@ from infrahub.git.models import TriggerRepositoryInternalChecks, TriggerReposito
61
61
  from infrahub.git.repository import InfrahubRepository, get_initialized_repo
62
62
  from infrahub.git.utils import fetch_artifact_definition_targets, fetch_proposed_change_generator_definition_targets
63
63
  from infrahub.graphql.analyzer import InfrahubGraphQLQueryAnalyzer
64
+ from infrahub.graphql.execution import cached_parse
64
65
  from infrahub.graphql.initialization import prepare_graphql_params
65
66
  from infrahub.log import get_logger
66
67
  from infrahub.message_bus.types import (
@@ -685,6 +686,7 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
685
686
  branch=source_branch,
686
687
  schema_branch=source_schema_branch,
687
688
  schema=graphql_params.schema,
689
+ document=cached_parse(model.artifact_definition.query_payload),
688
690
  )
689
691
 
690
692
  only_has_unique_targets = query_analyzer.query_report.only_has_unique_targets
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  from typing import TYPE_CHECKING
4
4
 
5
5
  import redis.asyncio as redis
6
+ from redis import UsernamePasswordCredentialProvider
6
7
 
7
8
  from infrahub import config
8
9
  from infrahub.services.adapters.cache import InfrahubCache
@@ -13,10 +14,16 @@ if TYPE_CHECKING:
13
14
 
14
15
  class RedisCache(InfrahubCache):
15
16
  def __init__(self) -> None:
17
+ credential_provider: UsernamePasswordCredentialProvider | None = None
18
+ if config.SETTINGS.cache.username and config.SETTINGS.cache.password:
19
+ credential_provider = UsernamePasswordCredentialProvider(
20
+ username=config.SETTINGS.cache.username, password=config.SETTINGS.cache.password
21
+ )
16
22
  self.connection = redis.Redis(
17
23
  host=config.SETTINGS.cache.address,
18
24
  port=config.SETTINGS.cache.service_port,
19
25
  db=config.SETTINGS.cache.database,
26
+ credential_provider=credential_provider,
20
27
  ssl=config.SETTINGS.cache.tls_enabled,
21
28
  ssl_cert_reqs="optional" if not config.SETTINGS.cache.tls_insecure else "none",
22
29
  ssl_check_hostname=not config.SETTINGS.cache.tls_insecure,
@@ -36,11 +36,38 @@ class HttpxAdapter(InfrahubHTTP):
36
36
 
37
37
  @cached_property
38
38
  def tls_context(self) -> ssl.SSLContext:
39
+ """TLS context based on global HTTPSettings.
40
+
41
+ May be an unverified context if tls_insecure=True in settings.
42
+ """
39
43
  return self.settings.get_tls_context()
40
44
 
45
+ @cached_property
46
+ def tls_context_verified(self) -> ssl.SSLContext:
47
+ """TLS context that always performs certificate validation.
48
+
49
+ Uses tls_ca_bundle from settings if configured, but ignores tls_insecure.
50
+ This allows callers to explicitly request certificate validation even when
51
+ the global setting disables it.
52
+ """
53
+ return self.settings.get_tls_context(force_verify=True)
54
+
41
55
  def verify_tls(self, verify: bool | None = None) -> bool | ssl.SSLContext:
56
+ """Determine the TLS verification behavior for a request.
57
+
58
+ Args:
59
+ verify: Override for TLS verification behavior.
60
+ - None: Use global settings (may skip verification if tls_insecure=True)
61
+ - False: Explicitly disable certificate validation
62
+ - True: Force certificate validation, ignoring global tls_insecure setting
63
+
64
+ Returns:
65
+ False to disable verification, or an SSLContext for verification.
66
+ """
42
67
  if verify is False:
43
68
  return False
69
+ if verify is True:
70
+ return self.tls_context_verified
44
71
 
45
72
  return self.tls_context
46
73
 
@@ -9,6 +9,7 @@ from infrahub.hfid.triggers import TRIGGER_HFID_ALL_SCHEMA
9
9
  from infrahub.profiles.triggers import TRIGGER_PROFILE_REFRESH_SETUP
10
10
  from infrahub.schema.triggers import TRIGGER_SCHEMA_UPDATED
11
11
  from infrahub.trigger.models import TriggerDefinition
12
+ from infrahub.trigger.system import TRIGGER_CRASH_ZOMBIE_FLOWS
12
13
  from infrahub.webhook.triggers import TRIGGER_WEBHOOK_DELETE, TRIGGER_WEBHOOK_SETUP_UPDATE
13
14
 
14
15
  builtin_triggers: list[TriggerDefinition] = [
@@ -16,6 +17,7 @@ builtin_triggers: list[TriggerDefinition] = [
16
17
  TRIGGER_BRANCH_MERGED,
17
18
  TRIGGER_COMPUTED_ATTRIBUTE_ALL_SCHEMA,
18
19
  TRIGGER_COMPUTED_ATTRIBUTE_PYTHON_SETUP_COMMIT,
20
+ TRIGGER_CRASH_ZOMBIE_FLOWS,
19
21
  TRIGGER_DISPLAY_LABELS_ALL_SCHEMA,
20
22
  TRIGGER_HFID_ALL_SCHEMA,
21
23
  TRIGGER_PROFILE_REFRESH_SETUP,
@@ -4,7 +4,8 @@ from datetime import timedelta
4
4
  from enum import StrEnum
5
5
  from typing import TYPE_CHECKING, Any, TypeVar
6
6
 
7
- from prefect.events.actions import RunDeployment
7
+ from prefect.client.schemas.objects import StateType # noqa: TC002
8
+ from prefect.events.actions import ChangeFlowRunState, RunDeployment
8
9
  from prefect.events.schemas.automations import Automation, Posture
9
10
  from prefect.events.schemas.automations import EventTrigger as PrefectEventTrigger
10
11
  from prefect.events.schemas.events import ResourceSpecification
@@ -146,6 +147,55 @@ class EventTrigger(BaseModel):
146
147
  return [ResourceSpecification(related_match) for related_match in self.match_related]
147
148
 
148
149
 
150
+ class ProactiveEventTrigger(EventTrigger):
151
+ """A proactive event trigger that fires when expected events do NOT occur within a time window.
152
+
153
+ Unlike EventTrigger which uses Reactive posture (fires when events occur),
154
+ ProactiveEventTrigger uses Proactive posture to detect missing events.
155
+ """
156
+
157
+ after: set[str] = Field(default_factory=set)
158
+ for_each: set[str] = Field(default_factory=set)
159
+ threshold: int = 1
160
+ within: timedelta = Field(default_factory=lambda: timedelta(seconds=90))
161
+
162
+ def get_prefect(self) -> PrefectEventTrigger:
163
+ return PrefectEventTrigger(
164
+ posture=Posture.Proactive,
165
+ after=self.after,
166
+ expect=self.events,
167
+ match=ResourceSpecification(self.match),
168
+ for_each=self.for_each,
169
+ threshold=self.threshold,
170
+ within=self.within,
171
+ )
172
+
173
+
174
+ class ChangeFlowRunStateAction(BaseModel):
175
+ """Action to change the state of a flow run.
176
+
177
+ Used for system automations that need to modify flow run states,
178
+ such as crashing zombie flows that have stopped sending heartbeats.
179
+ """
180
+
181
+ state: StateType
182
+ message: str = ""
183
+
184
+ def get_prefect(self, _mapping: dict[str, UUID] | None = None) -> ChangeFlowRunState:
185
+ """Get the Prefect ChangeFlowRunState action.
186
+
187
+ Args:
188
+ _mapping: Not used for this action type, but included for interface compatibility.
189
+
190
+ Returns:
191
+ A Prefect ChangeFlowRunState action.
192
+ """
193
+ return ChangeFlowRunState( # type: ignore[call-arg]
194
+ state=self.state,
195
+ message=self.message,
196
+ )
197
+
198
+
149
199
  class ExecuteWorkflow(BaseModel):
150
200
  workflow: WorkflowDefinition
151
201
  parameters: dict[str, Any] = Field(default_factory=dict)
@@ -186,17 +236,21 @@ class ExecuteWorkflow(BaseModel):
186
236
  raise ValueError(f"Workflow {self.workflow.name} doesn't support parameters: {wrong_params}")
187
237
 
188
238
 
239
+ # Type alias for all trigger action types
240
+ TriggerActionType = ExecuteWorkflow | ChangeFlowRunStateAction
241
+
242
+
189
243
  class TriggerDefinition(BaseModel):
190
244
  name: str
191
245
  type: TriggerType
192
246
  previous_names: set = Field(default_factory=set)
193
247
  description: str = ""
194
248
  trigger: EventTrigger
195
- actions: list[ExecuteWorkflow]
249
+ actions: list[TriggerActionType]
196
250
 
197
251
  def get_deployment_names(self) -> list[str]:
198
252
  """Return the name of all deployments used by this trigger"""
199
- return [action.name for action in self.actions]
253
+ return [action.name for action in self.actions if isinstance(action, ExecuteWorkflow)]
200
254
 
201
255
  def get_description(self) -> str:
202
256
  return f"Automation for Trigger {self.name} of type {self.type.value} (v{__version__})"
@@ -206,7 +260,8 @@ class TriggerDefinition(BaseModel):
206
260
 
207
261
  def validate_actions(self) -> None:
208
262
  for action in self.actions:
209
- action.validate_parameters()
263
+ if isinstance(action, ExecuteWorkflow):
264
+ action.validate_parameters()
210
265
 
211
266
 
212
267
  class TriggerBranchDefinition(TriggerDefinition):
@@ -218,3 +273,17 @@ class TriggerBranchDefinition(TriggerDefinition):
218
273
 
219
274
  class BuiltinTriggerDefinition(TriggerDefinition):
220
275
  type: TriggerType = TriggerType.BUILTIN
276
+
277
+
278
+ class SystemTriggerDefinition(BuiltinTriggerDefinition):
279
+ """A trigger definition for system-level Prefect automations.
280
+
281
+ Unlike other TriggerDefinitions which execute Infrahub workflows, SystemTriggerDefinition
282
+ is designed for Prefect system automations that don't require workflow deployments,
283
+ such as crashing zombie flows.
284
+
285
+ Uses ChangeFlowRunStateAction for actions (not ExecuteWorkflow).
286
+ """
287
+
288
+ def get_description(self) -> str:
289
+ return f"System Automation for {self.name} (v{__version__})"
infrahub/trigger/setup.py CHANGED
@@ -119,7 +119,7 @@ async def setup_triggers(
119
119
  description=trigger.get_description(),
120
120
  enabled=True,
121
121
  trigger=trigger.trigger.get_prefect(),
122
- actions=[action.get_prefect(mapping=deployments_mapping) for action in trigger.actions],
122
+ actions=[action.get_prefect(deployments_mapping) for action in trigger.actions],
123
123
  )
124
124
 
125
125
  existing_automation = existing_automations.get(trigger.generate_name())
@@ -0,0 +1,36 @@
1
+ """System-level Prefect automations.
2
+
3
+ This module contains system automations that manage Prefect infrastructure,
4
+ such as detecting and crashing zombie flow runs.
5
+ """
6
+
7
+ from datetime import timedelta
8
+
9
+ from prefect.client.schemas.objects import StateType
10
+
11
+ from infrahub.trigger.models import ChangeFlowRunStateAction, ProactiveEventTrigger, SystemTriggerDefinition
12
+
13
+ TRIGGER_CRASH_ZOMBIE_FLOWS = SystemTriggerDefinition(
14
+ name="crash-zombie-flows",
15
+ description="Crashes flow runs that have stopped sending heartbeats",
16
+ trigger=ProactiveEventTrigger(
17
+ after={"prefect.flow-run.heartbeat"},
18
+ events={
19
+ "prefect.flow-run.heartbeat",
20
+ "prefect.flow-run.Completed",
21
+ "prefect.flow-run.Failed",
22
+ "prefect.flow-run.Cancelled",
23
+ "prefect.flow-run.Crashed",
24
+ },
25
+ match={"prefect.resource.id": ["prefect.flow-run.*"]},
26
+ for_each={"prefect.resource.id"},
27
+ threshold=1,
28
+ within=timedelta(seconds=90),
29
+ ),
30
+ actions=[
31
+ ChangeFlowRunStateAction(
32
+ state=StateType.CRASHED,
33
+ message="Flow run marked as crashed due to missing heartbeats.",
34
+ )
35
+ ],
36
+ )
@@ -118,7 +118,7 @@ class Webhook(BaseModel):
118
118
  name: str = Field(...)
119
119
  url: str = Field(...)
120
120
  event_type: str = Field(...)
121
- validate_certificates: bool = Field(...)
121
+ validate_certificates: bool | None = Field(...)
122
122
  _payload: Any = None
123
123
  _headers: dict[str, Any] | None = None
124
124
  shared_key: str | None = Field(default=None, description="Shared key for signing the webhook requests")
@@ -162,7 +162,9 @@ class Webhook(BaseModel):
162
162
  self, data: dict[str, Any], context: EventContext, http_service: InfrahubHTTP, client: InfrahubClient
163
163
  ) -> Response:
164
164
  await self.prepare(data=data, context=context, client=client)
165
- return await http_service.post(url=self.url, json=self.get_payload(), headers=self._headers)
165
+ return await http_service.post(
166
+ url=self.url, json=self.get_payload(), headers=self._headers, verify=self.validate_certificates
167
+ )
166
168
 
167
169
  def get_payload(self) -> dict[str, Any]:
168
170
  return self._payload
infrahub/webhook/tasks.py CHANGED
@@ -12,7 +12,7 @@ from prefect.client.orchestration import get_client as get_prefect_client
12
12
  from prefect.logging import get_run_logger
13
13
 
14
14
  from infrahub.message_bus.types import KVTTL
15
- from infrahub.trigger.models import TriggerType
15
+ from infrahub.trigger.models import ExecuteWorkflow, TriggerType
16
16
  from infrahub.trigger.setup import gather_all_automations, setup_triggers_specific
17
17
  from infrahub.workers.dependencies import get_cache, get_client, get_database, get_http
18
18
  from infrahub.workflows.utils import add_tags
@@ -159,7 +159,7 @@ async def configure_webhook_one(
159
159
  description=trigger.get_description(),
160
160
  enabled=True,
161
161
  trigger=trigger.trigger.get_prefect(),
162
- actions=[action.get(deployment.id) for action in trigger.actions],
162
+ actions=[action.get(deployment.id) for action in trigger.actions if isinstance(action, ExecuteWorkflow)],
163
163
  )
164
164
 
165
165
  if existing_automation:
@@ -50,7 +50,7 @@ async def setup_blocks() -> None:
50
50
  log = get_run_logger()
51
51
 
52
52
  try:
53
- await RedisStorageContainer.register_type_and_schema()
53
+ await RedisStorageContainer.aregister_type_and_schema()
54
54
  except ObjectAlreadyExists:
55
55
  log.warning(f"Redis Storage {TASK_RESULT_STORAGE_NAME} already registered ")
56
56
 
@@ -62,7 +62,7 @@ async def setup_blocks() -> None:
62
62
  password=config.SETTINGS.cache.password or None,
63
63
  )
64
64
  try:
65
- await redis_block.save(name=TASK_RESULT_STORAGE_NAME, overwrite=True)
65
+ await redis_block.asave(name=TASK_RESULT_STORAGE_NAME, overwrite=True)
66
66
  except ObjectAlreadyExists:
67
67
  log.warning(f"Redis Storage {TASK_RESULT_STORAGE_NAME} already present ")
68
68
 
infrahub_sdk/analyzer.py CHANGED
@@ -30,10 +30,10 @@ class GraphQLOperation(BaseModel):
30
30
 
31
31
 
32
32
  class GraphQLQueryAnalyzer:
33
- def __init__(self, query: str, schema: GraphQLSchema | None = None) -> None:
33
+ def __init__(self, query: str, schema: GraphQLSchema | None = None, document: DocumentNode | None = None) -> None:
34
34
  self.query: str = query
35
35
  self.schema: GraphQLSchema | None = schema
36
- self.document: DocumentNode = parse(self.query)
36
+ self.document: DocumentNode = document or parse(self.query)
37
37
  self._fields: dict | None = None
38
38
 
39
39
  @property
infrahub_sdk/branch.py CHANGED
@@ -1,6 +1,5 @@
1
1
  from __future__ import annotations
2
2
 
3
- import warnings
4
3
  from enum import Enum
5
4
  from typing import TYPE_CHECKING, Any, Literal, overload
6
5
  from urllib.parse import urlencode
@@ -93,7 +92,6 @@ class InfrahubBranchManager(InfraHubBranchManagerBase):
93
92
  sync_with_git: bool = True,
94
93
  description: str = "",
95
94
  wait_until_completion: Literal[True] = True,
96
- background_execution: bool | None = False,
97
95
  ) -> BranchData: ...
98
96
 
99
97
  @overload
@@ -103,7 +101,6 @@ class InfrahubBranchManager(InfraHubBranchManagerBase):
103
101
  sync_with_git: bool = True,
104
102
  description: str = "",
105
103
  wait_until_completion: Literal[False] = False,
106
- background_execution: bool | None = False,
107
104
  ) -> str: ...
108
105
 
109
106
  async def create(
@@ -112,19 +109,9 @@ class InfrahubBranchManager(InfraHubBranchManagerBase):
112
109
  sync_with_git: bool = True,
113
110
  description: str = "",
114
111
  wait_until_completion: bool = True,
115
- background_execution: bool | None = False,
116
112
  ) -> BranchData | str:
117
- if background_execution is not None:
118
- warnings.warn(
119
- "`background_execution` is deprecated, please use `wait_until_completion` instead.",
120
- DeprecationWarning,
121
- stacklevel=1,
122
- )
123
-
124
- background_execution = background_execution or not wait_until_completion
125
113
  input_data = {
126
- # Should be switched to `wait_until_completion` once `background_execution` is removed server side.
127
- "background_execution": background_execution,
114
+ "wait_until_completion": wait_until_completion,
128
115
  "data": {
129
116
  "name": branch_name,
130
117
  "description": description,
@@ -132,15 +119,14 @@ class InfrahubBranchManager(InfraHubBranchManagerBase):
132
119
  },
133
120
  }
134
121
 
135
- mutation_query = MUTATION_QUERY_TASK if background_execution else MUTATION_QUERY_DATA
122
+ mutation_query = MUTATION_QUERY_DATA if wait_until_completion else MUTATION_QUERY_TASK
136
123
  query = Mutation(mutation="BranchCreate", input_data=input_data, query=mutation_query)
137
124
  response = await self.client.execute_graphql(query=query.render(), tracker="mutation-branch-create")
138
125
 
139
- # Make sure server version is recent enough to support background execution, as previously
140
- # using background_execution=True had no effect.
141
- if background_execution and "task" in response["BranchCreate"]:
142
- return response["BranchCreate"]["task"]["id"]
143
- return BranchData(**response["BranchCreate"]["object"])
126
+ if wait_until_completion:
127
+ return BranchData(**response["BranchCreate"]["object"])
128
+
129
+ return response["BranchCreate"]["task"]["id"]
144
130
 
145
131
  async def delete(self, branch_name: str) -> bool:
146
132
  input_data = {
@@ -261,7 +247,6 @@ class InfrahubBranchManagerSync(InfraHubBranchManagerBase):
261
247
  sync_with_git: bool = True,
262
248
  description: str = "",
263
249
  wait_until_completion: Literal[True] = True,
264
- background_execution: bool | None = False,
265
250
  ) -> BranchData: ...
266
251
 
267
252
  @overload
@@ -271,7 +256,6 @@ class InfrahubBranchManagerSync(InfraHubBranchManagerBase):
271
256
  sync_with_git: bool = True,
272
257
  description: str = "",
273
258
  wait_until_completion: Literal[False] = False,
274
- background_execution: bool | None = False,
275
259
  ) -> str: ...
276
260
 
277
261
  def create(
@@ -280,19 +264,9 @@ class InfrahubBranchManagerSync(InfraHubBranchManagerBase):
280
264
  sync_with_git: bool = True,
281
265
  description: str = "",
282
266
  wait_until_completion: bool = True,
283
- background_execution: bool | None = False,
284
267
  ) -> BranchData | str:
285
- if background_execution is not None:
286
- warnings.warn(
287
- "`background_execution` is deprecated, please use `wait_until_completion` instead.",
288
- DeprecationWarning,
289
- stacklevel=1,
290
- )
291
-
292
- background_execution = background_execution or not wait_until_completion
293
268
  input_data = {
294
- # Should be switched to `wait_until_completion` once `background_execution` is removed server side.
295
- "background_execution": background_execution,
269
+ "wait_until_completion": wait_until_completion,
296
270
  "data": {
297
271
  "name": branch_name,
298
272
  "description": description,
@@ -300,15 +274,14 @@ class InfrahubBranchManagerSync(InfraHubBranchManagerBase):
300
274
  },
301
275
  }
302
276
 
303
- mutation_query = MUTATION_QUERY_TASK if background_execution else MUTATION_QUERY_DATA
277
+ mutation_query = MUTATION_QUERY_DATA if wait_until_completion else MUTATION_QUERY_TASK
304
278
  query = Mutation(mutation="BranchCreate", input_data=input_data, query=mutation_query)
305
279
  response = self.client.execute_graphql(query=query.render(), tracker="mutation-branch-create")
306
280
 
307
- # Make sure server version is recent enough to support background execution, as previously
308
- # using background_execution=True had no effect.
309
- if background_execution and "task" in response["BranchCreate"]:
310
- return response["BranchCreate"]["task"]["id"]
311
- return BranchData(**response["BranchCreate"]["object"])
281
+ if wait_until_completion:
282
+ return BranchData(**response["BranchCreate"]["object"])
283
+
284
+ return response["BranchCreate"]["task"]["id"]
312
285
 
313
286
  def delete(self, branch_name: str) -> bool:
314
287
  input_data = {