infrahub-server 1.7.1__py3-none-any.whl → 1.7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/actions/gather.py +2 -2
- infrahub/api/query.py +3 -2
- infrahub/api/transformation.py +3 -3
- infrahub/computed_attribute/gather.py +2 -0
- infrahub/config.py +2 -2
- infrahub/core/attribute.py +21 -2
- infrahub/core/diff/model/path.py +43 -0
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/graph/index.py +2 -0
- infrahub/core/ipam/resource_allocator.py +229 -0
- infrahub/core/migrations/graph/__init__.py +8 -0
- infrahub/core/migrations/graph/m052_fix_global_branch_level.py +51 -0
- infrahub/core/migrations/graph/m053_fix_branch_level_zero.py +61 -0
- infrahub/core/migrations/graph/m054_cleanup_orphaned_nodes.py +87 -0
- infrahub/core/migrations/graph/m055_remove_webhook_validate_certificates_default.py +86 -0
- infrahub/core/migrations/schema/node_attribute_add.py +17 -19
- infrahub/core/node/lock_utils.py +23 -2
- infrahub/core/node/resource_manager/ip_address_pool.py +5 -11
- infrahub/core/node/resource_manager/ip_prefix_pool.py +5 -21
- infrahub/core/node/resource_manager/number_pool.py +109 -39
- infrahub/core/query/__init__.py +7 -1
- infrahub/core/query/branch.py +18 -2
- infrahub/core/query/ipam.py +629 -40
- infrahub/core/query/node.py +128 -0
- infrahub/core/query/resource_manager.py +114 -1
- infrahub/core/relationship/model.py +1 -1
- infrahub/core/schema/definitions/core/webhook.py +0 -1
- infrahub/core/schema/definitions/internal.py +7 -4
- infrahub/core/validators/determiner.py +4 -0
- infrahub/graphql/analyzer.py +3 -1
- infrahub/graphql/app.py +7 -10
- infrahub/graphql/execution.py +95 -0
- infrahub/graphql/mutations/proposed_change.py +15 -0
- infrahub/graphql/parser.py +10 -7
- infrahub/graphql/queries/ipam.py +20 -25
- infrahub/graphql/queries/search.py +29 -9
- infrahub/proposed_change/tasks.py +2 -0
- infrahub/services/adapters/http/httpx.py +27 -0
- infrahub/trigger/catalogue.py +2 -0
- infrahub/trigger/models.py +73 -4
- infrahub/trigger/setup.py +1 -1
- infrahub/trigger/system.py +36 -0
- infrahub/webhook/models.py +4 -2
- infrahub/webhook/tasks.py +2 -2
- infrahub/workflows/initialization.py +2 -2
- {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/METADATA +3 -3
- {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/RECORD +52 -46
- infrahub_testcontainers/docker-compose-cluster.test.yml +16 -10
- infrahub_testcontainers/docker-compose.test.yml +11 -10
- infrahub/pools/address.py +0 -16
- {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/WHEEL +0 -0
- {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/entry_points.txt +0 -0
- {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/licenses/LICENSE.txt +0 -0
infrahub/graphql/queries/ipam.py
CHANGED
|
@@ -7,11 +7,10 @@ from graphene import Field, Int, ObjectType, String
|
|
|
7
7
|
from netaddr import IPSet
|
|
8
8
|
|
|
9
9
|
from infrahub.core.constants import InfrahubKind
|
|
10
|
+
from infrahub.core.ipam.resource_allocator import IPAMResourceAllocator
|
|
10
11
|
from infrahub.core.manager import NodeManager
|
|
11
12
|
from infrahub.core.protocols import BuiltinIPPrefix
|
|
12
|
-
from infrahub.core.query.ipam import get_ip_addresses, get_subnets
|
|
13
13
|
from infrahub.exceptions import NodeNotFoundError, ValidationError
|
|
14
|
-
from infrahub.pools.address import get_available
|
|
15
14
|
from infrahub.pools.prefix import get_next_available_prefix
|
|
16
15
|
|
|
17
16
|
if TYPE_CHECKING:
|
|
@@ -48,24 +47,15 @@ class IPAddressGetNextAvailable(ObjectType):
|
|
|
48
47
|
raise ValidationError(input_value="Invalid prefix length for current selected prefix")
|
|
49
48
|
|
|
50
49
|
namespace = await prefix.ip_namespace.get_peer(db=graphql_context.db) # type: ignore[attr-defined]
|
|
51
|
-
|
|
52
|
-
|
|
50
|
+
allocator = IPAMResourceAllocator(db=graphql_context.db, namespace=namespace, branch=graphql_context.branch)
|
|
51
|
+
next_address = await allocator.get_next_address(
|
|
53
52
|
ip_prefix=ip_prefix,
|
|
54
|
-
namespace=namespace,
|
|
55
|
-
branch=graphql_context.branch,
|
|
56
|
-
)
|
|
57
|
-
|
|
58
|
-
available = get_available(
|
|
59
|
-
network=ip_prefix,
|
|
60
|
-
addresses=[ip.address for ip in addresses],
|
|
61
53
|
is_pool=prefix.is_pool.value, # type: ignore[attr-defined]
|
|
62
54
|
)
|
|
63
55
|
|
|
64
|
-
if not
|
|
56
|
+
if not next_address:
|
|
65
57
|
raise IndexError("No addresses available in prefix")
|
|
66
58
|
|
|
67
|
-
next_address = available.iter_cidrs()[0]
|
|
68
|
-
|
|
69
59
|
return {"address": f"{next_address.ip}/{prefix_length}"}
|
|
70
60
|
|
|
71
61
|
|
|
@@ -90,22 +80,27 @@ class IPPrefixGetNextAvailable(ObjectType):
|
|
|
90
80
|
branch_name=graphql_context.branch.name, node_type=InfrahubKind.IPPREFIX, identifier=prefix_id
|
|
91
81
|
)
|
|
92
82
|
|
|
83
|
+
ip_prefix = prefix.prefix.obj
|
|
93
84
|
namespace = await prefix.ip_namespace.get_peer(db=graphql_context.db)
|
|
94
|
-
|
|
95
|
-
db=graphql_context.db,
|
|
96
|
-
ip_prefix=ipaddress.ip_network(prefix.prefix.value),
|
|
97
|
-
namespace=namespace,
|
|
98
|
-
branch=graphql_context.branch,
|
|
99
|
-
)
|
|
85
|
+
allocator = IPAMResourceAllocator(db=graphql_context.db, namespace=namespace, branch=graphql_context.branch)
|
|
100
86
|
|
|
101
|
-
pool
|
|
87
|
+
# Build available pool by removing existing subnets from parent prefix
|
|
88
|
+
subnets = await allocator.get_subnets(ip_prefix=ip_prefix)
|
|
89
|
+
available_pool = IPSet([str(ip_prefix)])
|
|
102
90
|
for subnet in subnets:
|
|
103
|
-
|
|
91
|
+
available_pool.remove(str(subnet.prefix))
|
|
104
92
|
|
|
105
|
-
|
|
106
|
-
|
|
93
|
+
if prefix_length is not None and not ip_prefix.prefixlen < prefix_length <= ip_prefix.max_prefixlen:
|
|
94
|
+
raise ValidationError(input_value="Invalid prefix length for current selected prefix")
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
next_prefix = get_next_available_prefix(
|
|
98
|
+
pool=available_pool, prefix_length=prefix_length, prefix_ver=ip_prefix.version
|
|
99
|
+
)
|
|
100
|
+
except ValueError as exc:
|
|
101
|
+
raise IndexError("No prefixes available in prefix") from exc
|
|
107
102
|
|
|
108
|
-
return {"prefix": str(
|
|
103
|
+
return {"prefix": str(next_prefix)}
|
|
109
104
|
|
|
110
105
|
|
|
111
106
|
InfrahubIPAddressGetNextAvailable = Field(
|
|
@@ -9,12 +9,12 @@ from infrahub_sdk.utils import is_valid_uuid
|
|
|
9
9
|
|
|
10
10
|
from infrahub.core.constants import InfrahubKind
|
|
11
11
|
from infrahub.core.manager import NodeManager
|
|
12
|
+
from infrahub.core.query.node import NodeGetListByAttributeValueQuery
|
|
12
13
|
from infrahub.graphql.field_extractor import extract_graphql_fields
|
|
13
14
|
|
|
14
15
|
if TYPE_CHECKING:
|
|
15
16
|
from graphql import GraphQLResolveInfo
|
|
16
17
|
|
|
17
|
-
from infrahub.core.node import Node as InfrahubNode
|
|
18
18
|
from infrahub.graphql.initialization import GraphqlContext
|
|
19
19
|
|
|
20
20
|
|
|
@@ -104,10 +104,11 @@ async def search_resolver(
|
|
|
104
104
|
q: str,
|
|
105
105
|
limit: int = 10,
|
|
106
106
|
partial_match: bool = True,
|
|
107
|
+
case_sensitive: bool = False,
|
|
107
108
|
) -> dict[str, Any]:
|
|
108
109
|
graphql_context: GraphqlContext = info.context
|
|
109
110
|
response: dict[str, Any] = {}
|
|
110
|
-
results: list[
|
|
111
|
+
results: list[dict[str, str]] = []
|
|
111
112
|
|
|
112
113
|
fields = extract_graphql_fields(info=info)
|
|
113
114
|
|
|
@@ -116,25 +117,43 @@ async def search_resolver(
|
|
|
116
117
|
db=graphql_context.db, branch=graphql_context.branch, at=graphql_context.at, id=q
|
|
117
118
|
)
|
|
118
119
|
if matching:
|
|
119
|
-
results.append(matching)
|
|
120
|
+
results.append({"id": matching.id, "kind": matching.get_kind()})
|
|
120
121
|
else:
|
|
121
122
|
with contextlib.suppress(ValueError, ipaddress.AddressValueError):
|
|
122
123
|
# Convert any IPv6 address, network or partial address to collapsed format as it might be stored in db.
|
|
123
124
|
q = _collapse_ipv6(q)
|
|
124
125
|
|
|
125
|
-
|
|
126
|
-
|
|
126
|
+
if case_sensitive:
|
|
127
|
+
# Case-sensitive search using the dedicated query
|
|
128
|
+
query = await NodeGetListByAttributeValueQuery.init(
|
|
127
129
|
db=graphql_context.db,
|
|
128
130
|
branch=graphql_context.branch,
|
|
129
|
-
|
|
130
|
-
|
|
131
|
+
at=graphql_context.at,
|
|
132
|
+
search_value=q,
|
|
133
|
+
kinds=[InfrahubKind.NODE, InfrahubKind.GENERICGROUP],
|
|
131
134
|
limit=limit,
|
|
132
135
|
partial_match=partial_match,
|
|
133
136
|
)
|
|
134
|
-
|
|
137
|
+
await query.execute(db=graphql_context.db)
|
|
138
|
+
|
|
139
|
+
for result in query.get_data():
|
|
140
|
+
results.append({"id": result.uuid, "kind": result.kind})
|
|
141
|
+
else:
|
|
142
|
+
# Default: case-insensitive search using NodeManager.query
|
|
143
|
+
for kind in [InfrahubKind.NODE, InfrahubKind.GENERICGROUP]:
|
|
144
|
+
objs = await NodeManager.query(
|
|
145
|
+
db=graphql_context.db,
|
|
146
|
+
branch=graphql_context.branch,
|
|
147
|
+
schema=kind,
|
|
148
|
+
filters={"any__value": q},
|
|
149
|
+
limit=limit,
|
|
150
|
+
partial_match=partial_match,
|
|
151
|
+
)
|
|
152
|
+
for obj in objs:
|
|
153
|
+
results.append({"id": obj.id, "kind": obj.get_kind()})
|
|
135
154
|
|
|
136
155
|
if "edges" in fields:
|
|
137
|
-
response["edges"] = [{"node":
|
|
156
|
+
response["edges"] = [{"node": result} for result in results]
|
|
138
157
|
|
|
139
158
|
if "count" in fields:
|
|
140
159
|
response["count"] = len(results)
|
|
@@ -147,6 +166,7 @@ InfrahubSearchAnywhere = Field(
|
|
|
147
166
|
q=String(required=True),
|
|
148
167
|
limit=Int(required=False),
|
|
149
168
|
partial_match=Boolean(required=False),
|
|
169
|
+
case_sensitive=Boolean(required=False),
|
|
150
170
|
resolver=search_resolver,
|
|
151
171
|
required=True,
|
|
152
172
|
)
|
|
@@ -61,6 +61,7 @@ from infrahub.git.models import TriggerRepositoryInternalChecks, TriggerReposito
|
|
|
61
61
|
from infrahub.git.repository import InfrahubRepository, get_initialized_repo
|
|
62
62
|
from infrahub.git.utils import fetch_artifact_definition_targets, fetch_proposed_change_generator_definition_targets
|
|
63
63
|
from infrahub.graphql.analyzer import InfrahubGraphQLQueryAnalyzer
|
|
64
|
+
from infrahub.graphql.execution import cached_parse
|
|
64
65
|
from infrahub.graphql.initialization import prepare_graphql_params
|
|
65
66
|
from infrahub.log import get_logger
|
|
66
67
|
from infrahub.message_bus.types import (
|
|
@@ -685,6 +686,7 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
|
|
|
685
686
|
branch=source_branch,
|
|
686
687
|
schema_branch=source_schema_branch,
|
|
687
688
|
schema=graphql_params.schema,
|
|
689
|
+
document=cached_parse(model.artifact_definition.query_payload),
|
|
688
690
|
)
|
|
689
691
|
|
|
690
692
|
only_has_unique_targets = query_analyzer.query_report.only_has_unique_targets
|
|
@@ -36,11 +36,38 @@ class HttpxAdapter(InfrahubHTTP):
|
|
|
36
36
|
|
|
37
37
|
@cached_property
|
|
38
38
|
def tls_context(self) -> ssl.SSLContext:
|
|
39
|
+
"""TLS context based on global HTTPSettings.
|
|
40
|
+
|
|
41
|
+
May be an unverified context if tls_insecure=True in settings.
|
|
42
|
+
"""
|
|
39
43
|
return self.settings.get_tls_context()
|
|
40
44
|
|
|
45
|
+
@cached_property
|
|
46
|
+
def tls_context_verified(self) -> ssl.SSLContext:
|
|
47
|
+
"""TLS context that always performs certificate validation.
|
|
48
|
+
|
|
49
|
+
Uses tls_ca_bundle from settings if configured, but ignores tls_insecure.
|
|
50
|
+
This allows callers to explicitly request certificate validation even when
|
|
51
|
+
the global setting disables it.
|
|
52
|
+
"""
|
|
53
|
+
return self.settings.get_tls_context(force_verify=True)
|
|
54
|
+
|
|
41
55
|
def verify_tls(self, verify: bool | None = None) -> bool | ssl.SSLContext:
|
|
56
|
+
"""Determine the TLS verification behavior for a request.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
verify: Override for TLS verification behavior.
|
|
60
|
+
- None: Use global settings (may skip verification if tls_insecure=True)
|
|
61
|
+
- False: Explicitly disable certificate validation
|
|
62
|
+
- True: Force certificate validation, ignoring global tls_insecure setting
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
False to disable verification, or an SSLContext for verification.
|
|
66
|
+
"""
|
|
42
67
|
if verify is False:
|
|
43
68
|
return False
|
|
69
|
+
if verify is True:
|
|
70
|
+
return self.tls_context_verified
|
|
44
71
|
|
|
45
72
|
return self.tls_context
|
|
46
73
|
|
infrahub/trigger/catalogue.py
CHANGED
|
@@ -9,6 +9,7 @@ from infrahub.hfid.triggers import TRIGGER_HFID_ALL_SCHEMA
|
|
|
9
9
|
from infrahub.profiles.triggers import TRIGGER_PROFILE_REFRESH_SETUP
|
|
10
10
|
from infrahub.schema.triggers import TRIGGER_SCHEMA_UPDATED
|
|
11
11
|
from infrahub.trigger.models import TriggerDefinition
|
|
12
|
+
from infrahub.trigger.system import TRIGGER_CRASH_ZOMBIE_FLOWS
|
|
12
13
|
from infrahub.webhook.triggers import TRIGGER_WEBHOOK_DELETE, TRIGGER_WEBHOOK_SETUP_UPDATE
|
|
13
14
|
|
|
14
15
|
builtin_triggers: list[TriggerDefinition] = [
|
|
@@ -16,6 +17,7 @@ builtin_triggers: list[TriggerDefinition] = [
|
|
|
16
17
|
TRIGGER_BRANCH_MERGED,
|
|
17
18
|
TRIGGER_COMPUTED_ATTRIBUTE_ALL_SCHEMA,
|
|
18
19
|
TRIGGER_COMPUTED_ATTRIBUTE_PYTHON_SETUP_COMMIT,
|
|
20
|
+
TRIGGER_CRASH_ZOMBIE_FLOWS,
|
|
19
21
|
TRIGGER_DISPLAY_LABELS_ALL_SCHEMA,
|
|
20
22
|
TRIGGER_HFID_ALL_SCHEMA,
|
|
21
23
|
TRIGGER_PROFILE_REFRESH_SETUP,
|
infrahub/trigger/models.py
CHANGED
|
@@ -4,7 +4,8 @@ from datetime import timedelta
|
|
|
4
4
|
from enum import StrEnum
|
|
5
5
|
from typing import TYPE_CHECKING, Any, TypeVar
|
|
6
6
|
|
|
7
|
-
from prefect.
|
|
7
|
+
from prefect.client.schemas.objects import StateType # noqa: TC002
|
|
8
|
+
from prefect.events.actions import ChangeFlowRunState, RunDeployment
|
|
8
9
|
from prefect.events.schemas.automations import Automation, Posture
|
|
9
10
|
from prefect.events.schemas.automations import EventTrigger as PrefectEventTrigger
|
|
10
11
|
from prefect.events.schemas.events import ResourceSpecification
|
|
@@ -146,6 +147,55 @@ class EventTrigger(BaseModel):
|
|
|
146
147
|
return [ResourceSpecification(related_match) for related_match in self.match_related]
|
|
147
148
|
|
|
148
149
|
|
|
150
|
+
class ProactiveEventTrigger(EventTrigger):
|
|
151
|
+
"""A proactive event trigger that fires when expected events do NOT occur within a time window.
|
|
152
|
+
|
|
153
|
+
Unlike EventTrigger which uses Reactive posture (fires when events occur),
|
|
154
|
+
ProactiveEventTrigger uses Proactive posture to detect missing events.
|
|
155
|
+
"""
|
|
156
|
+
|
|
157
|
+
after: set[str] = Field(default_factory=set)
|
|
158
|
+
for_each: set[str] = Field(default_factory=set)
|
|
159
|
+
threshold: int = 1
|
|
160
|
+
within: timedelta = Field(default_factory=lambda: timedelta(seconds=90))
|
|
161
|
+
|
|
162
|
+
def get_prefect(self) -> PrefectEventTrigger:
|
|
163
|
+
return PrefectEventTrigger(
|
|
164
|
+
posture=Posture.Proactive,
|
|
165
|
+
after=self.after,
|
|
166
|
+
expect=self.events,
|
|
167
|
+
match=ResourceSpecification(self.match),
|
|
168
|
+
for_each=self.for_each,
|
|
169
|
+
threshold=self.threshold,
|
|
170
|
+
within=self.within,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
class ChangeFlowRunStateAction(BaseModel):
|
|
175
|
+
"""Action to change the state of a flow run.
|
|
176
|
+
|
|
177
|
+
Used for system automations that need to modify flow run states,
|
|
178
|
+
such as crashing zombie flows that have stopped sending heartbeats.
|
|
179
|
+
"""
|
|
180
|
+
|
|
181
|
+
state: StateType
|
|
182
|
+
message: str = ""
|
|
183
|
+
|
|
184
|
+
def get_prefect(self, _mapping: dict[str, UUID] | None = None) -> ChangeFlowRunState:
|
|
185
|
+
"""Get the Prefect ChangeFlowRunState action.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
_mapping: Not used for this action type, but included for interface compatibility.
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
A Prefect ChangeFlowRunState action.
|
|
192
|
+
"""
|
|
193
|
+
return ChangeFlowRunState( # type: ignore[call-arg]
|
|
194
|
+
state=self.state,
|
|
195
|
+
message=self.message,
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
|
|
149
199
|
class ExecuteWorkflow(BaseModel):
|
|
150
200
|
workflow: WorkflowDefinition
|
|
151
201
|
parameters: dict[str, Any] = Field(default_factory=dict)
|
|
@@ -186,17 +236,21 @@ class ExecuteWorkflow(BaseModel):
|
|
|
186
236
|
raise ValueError(f"Workflow {self.workflow.name} doesn't support parameters: {wrong_params}")
|
|
187
237
|
|
|
188
238
|
|
|
239
|
+
# Type alias for all trigger action types
|
|
240
|
+
TriggerActionType = ExecuteWorkflow | ChangeFlowRunStateAction
|
|
241
|
+
|
|
242
|
+
|
|
189
243
|
class TriggerDefinition(BaseModel):
|
|
190
244
|
name: str
|
|
191
245
|
type: TriggerType
|
|
192
246
|
previous_names: set = Field(default_factory=set)
|
|
193
247
|
description: str = ""
|
|
194
248
|
trigger: EventTrigger
|
|
195
|
-
actions: list[
|
|
249
|
+
actions: list[TriggerActionType]
|
|
196
250
|
|
|
197
251
|
def get_deployment_names(self) -> list[str]:
|
|
198
252
|
"""Return the name of all deployments used by this trigger"""
|
|
199
|
-
return [action.name for action in self.actions]
|
|
253
|
+
return [action.name for action in self.actions if isinstance(action, ExecuteWorkflow)]
|
|
200
254
|
|
|
201
255
|
def get_description(self) -> str:
|
|
202
256
|
return f"Automation for Trigger {self.name} of type {self.type.value} (v{__version__})"
|
|
@@ -206,7 +260,8 @@ class TriggerDefinition(BaseModel):
|
|
|
206
260
|
|
|
207
261
|
def validate_actions(self) -> None:
|
|
208
262
|
for action in self.actions:
|
|
209
|
-
action
|
|
263
|
+
if isinstance(action, ExecuteWorkflow):
|
|
264
|
+
action.validate_parameters()
|
|
210
265
|
|
|
211
266
|
|
|
212
267
|
class TriggerBranchDefinition(TriggerDefinition):
|
|
@@ -218,3 +273,17 @@ class TriggerBranchDefinition(TriggerDefinition):
|
|
|
218
273
|
|
|
219
274
|
class BuiltinTriggerDefinition(TriggerDefinition):
|
|
220
275
|
type: TriggerType = TriggerType.BUILTIN
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
class SystemTriggerDefinition(BuiltinTriggerDefinition):
|
|
279
|
+
"""A trigger definition for system-level Prefect automations.
|
|
280
|
+
|
|
281
|
+
Unlike other TriggerDefinitions which execute Infrahub workflows, SystemTriggerDefinition
|
|
282
|
+
is designed for Prefect system automations that don't require workflow deployments,
|
|
283
|
+
such as crashing zombie flows.
|
|
284
|
+
|
|
285
|
+
Uses ChangeFlowRunStateAction for actions (not ExecuteWorkflow).
|
|
286
|
+
"""
|
|
287
|
+
|
|
288
|
+
def get_description(self) -> str:
|
|
289
|
+
return f"System Automation for {self.name} (v{__version__})"
|
infrahub/trigger/setup.py
CHANGED
|
@@ -119,7 +119,7 @@ async def setup_triggers(
|
|
|
119
119
|
description=trigger.get_description(),
|
|
120
120
|
enabled=True,
|
|
121
121
|
trigger=trigger.trigger.get_prefect(),
|
|
122
|
-
actions=[action.get_prefect(
|
|
122
|
+
actions=[action.get_prefect(deployments_mapping) for action in trigger.actions],
|
|
123
123
|
)
|
|
124
124
|
|
|
125
125
|
existing_automation = existing_automations.get(trigger.generate_name())
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
"""System-level Prefect automations.
|
|
2
|
+
|
|
3
|
+
This module contains system automations that manage Prefect infrastructure,
|
|
4
|
+
such as detecting and crashing zombie flow runs.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from datetime import timedelta
|
|
8
|
+
|
|
9
|
+
from prefect.client.schemas.objects import StateType
|
|
10
|
+
|
|
11
|
+
from infrahub.trigger.models import ChangeFlowRunStateAction, ProactiveEventTrigger, SystemTriggerDefinition
|
|
12
|
+
|
|
13
|
+
TRIGGER_CRASH_ZOMBIE_FLOWS = SystemTriggerDefinition(
|
|
14
|
+
name="crash-zombie-flows",
|
|
15
|
+
description="Crashes flow runs that have stopped sending heartbeats",
|
|
16
|
+
trigger=ProactiveEventTrigger(
|
|
17
|
+
after={"prefect.flow-run.heartbeat"},
|
|
18
|
+
events={
|
|
19
|
+
"prefect.flow-run.heartbeat",
|
|
20
|
+
"prefect.flow-run.Completed",
|
|
21
|
+
"prefect.flow-run.Failed",
|
|
22
|
+
"prefect.flow-run.Cancelled",
|
|
23
|
+
"prefect.flow-run.Crashed",
|
|
24
|
+
},
|
|
25
|
+
match={"prefect.resource.id": ["prefect.flow-run.*"]},
|
|
26
|
+
for_each={"prefect.resource.id"},
|
|
27
|
+
threshold=1,
|
|
28
|
+
within=timedelta(seconds=90),
|
|
29
|
+
),
|
|
30
|
+
actions=[
|
|
31
|
+
ChangeFlowRunStateAction(
|
|
32
|
+
state=StateType.CRASHED,
|
|
33
|
+
message="Flow run marked as crashed due to missing heartbeats.",
|
|
34
|
+
)
|
|
35
|
+
],
|
|
36
|
+
)
|
infrahub/webhook/models.py
CHANGED
|
@@ -118,7 +118,7 @@ class Webhook(BaseModel):
|
|
|
118
118
|
name: str = Field(...)
|
|
119
119
|
url: str = Field(...)
|
|
120
120
|
event_type: str = Field(...)
|
|
121
|
-
validate_certificates: bool = Field(...)
|
|
121
|
+
validate_certificates: bool | None = Field(...)
|
|
122
122
|
_payload: Any = None
|
|
123
123
|
_headers: dict[str, Any] | None = None
|
|
124
124
|
shared_key: str | None = Field(default=None, description="Shared key for signing the webhook requests")
|
|
@@ -162,7 +162,9 @@ class Webhook(BaseModel):
|
|
|
162
162
|
self, data: dict[str, Any], context: EventContext, http_service: InfrahubHTTP, client: InfrahubClient
|
|
163
163
|
) -> Response:
|
|
164
164
|
await self.prepare(data=data, context=context, client=client)
|
|
165
|
-
return await http_service.post(
|
|
165
|
+
return await http_service.post(
|
|
166
|
+
url=self.url, json=self.get_payload(), headers=self._headers, verify=self.validate_certificates
|
|
167
|
+
)
|
|
166
168
|
|
|
167
169
|
def get_payload(self) -> dict[str, Any]:
|
|
168
170
|
return self._payload
|
infrahub/webhook/tasks.py
CHANGED
|
@@ -12,7 +12,7 @@ from prefect.client.orchestration import get_client as get_prefect_client
|
|
|
12
12
|
from prefect.logging import get_run_logger
|
|
13
13
|
|
|
14
14
|
from infrahub.message_bus.types import KVTTL
|
|
15
|
-
from infrahub.trigger.models import TriggerType
|
|
15
|
+
from infrahub.trigger.models import ExecuteWorkflow, TriggerType
|
|
16
16
|
from infrahub.trigger.setup import gather_all_automations, setup_triggers_specific
|
|
17
17
|
from infrahub.workers.dependencies import get_cache, get_client, get_database, get_http
|
|
18
18
|
from infrahub.workflows.utils import add_tags
|
|
@@ -159,7 +159,7 @@ async def configure_webhook_one(
|
|
|
159
159
|
description=trigger.get_description(),
|
|
160
160
|
enabled=True,
|
|
161
161
|
trigger=trigger.trigger.get_prefect(),
|
|
162
|
-
actions=[action.get(deployment.id) for action in trigger.actions],
|
|
162
|
+
actions=[action.get(deployment.id) for action in trigger.actions if isinstance(action, ExecuteWorkflow)],
|
|
163
163
|
)
|
|
164
164
|
|
|
165
165
|
if existing_automation:
|
|
@@ -50,7 +50,7 @@ async def setup_blocks() -> None:
|
|
|
50
50
|
log = get_run_logger()
|
|
51
51
|
|
|
52
52
|
try:
|
|
53
|
-
await RedisStorageContainer.
|
|
53
|
+
await RedisStorageContainer.aregister_type_and_schema()
|
|
54
54
|
except ObjectAlreadyExists:
|
|
55
55
|
log.warning(f"Redis Storage {TASK_RESULT_STORAGE_NAME} already registered ")
|
|
56
56
|
|
|
@@ -62,7 +62,7 @@ async def setup_blocks() -> None:
|
|
|
62
62
|
password=config.SETTINGS.cache.password or None,
|
|
63
63
|
)
|
|
64
64
|
try:
|
|
65
|
-
await redis_block.
|
|
65
|
+
await redis_block.asave(name=TASK_RESULT_STORAGE_NAME, overwrite=True)
|
|
66
66
|
except ObjectAlreadyExists:
|
|
67
67
|
log.warning(f"Redis Storage {TASK_RESULT_STORAGE_NAME} already present ")
|
|
68
68
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: infrahub-server
|
|
3
|
-
Version: 1.7.
|
|
3
|
+
Version: 1.7.2
|
|
4
4
|
Summary: Infrahub is taking a new approach to Infrastructure Management by providing a new generation of datastore to organize and control all the data that defines how an infrastructure should run.
|
|
5
5
|
Project-URL: Homepage, https://opsmill.com
|
|
6
6
|
Project-URL: Repository, https://github.com/opsmill/infrahub
|
|
@@ -44,13 +44,13 @@ Requires-Dist: opentelemetry-exporter-otlp-proto-http==1.39.0
|
|
|
44
44
|
Requires-Dist: opentelemetry-instrumentation-aio-pika==0.60b0
|
|
45
45
|
Requires-Dist: opentelemetry-instrumentation-fastapi==0.60b0
|
|
46
46
|
Requires-Dist: prefect-redis==0.2.8
|
|
47
|
-
Requires-Dist: prefect==3.6.
|
|
47
|
+
Requires-Dist: prefect==3.6.13
|
|
48
48
|
Requires-Dist: pyarrow>=14
|
|
49
49
|
Requires-Dist: pydantic-settings<2.9,>=2.8
|
|
50
50
|
Requires-Dist: pydantic<2.13,>=2.12
|
|
51
51
|
Requires-Dist: pyjwt<2.9,>=2.8
|
|
52
52
|
Requires-Dist: pytest<9.1,>=9.0
|
|
53
|
-
Requires-Dist: python-multipart==0.0.
|
|
53
|
+
Requires-Dist: python-multipart==0.0.22
|
|
54
54
|
Requires-Dist: pyyaml<7,>=6
|
|
55
55
|
Requires-Dist: redis[hiredis]==6.0.0
|
|
56
56
|
Requires-Dist: rich<14,>=13
|