infrahub-server 1.4.10__py3-none-any.whl → 1.5.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/actions/tasks.py +208 -16
- infrahub/api/artifact.py +3 -0
- infrahub/api/diff/diff.py +1 -1
- infrahub/api/query.py +2 -0
- infrahub/api/schema.py +3 -0
- infrahub/auth.py +5 -5
- infrahub/cli/db.py +26 -2
- infrahub/cli/db_commands/clean_duplicate_schema_fields.py +212 -0
- infrahub/config.py +7 -2
- infrahub/core/attribute.py +25 -22
- infrahub/core/branch/models.py +2 -2
- infrahub/core/branch/needs_rebase_status.py +11 -0
- infrahub/core/branch/tasks.py +4 -3
- infrahub/core/changelog/models.py +4 -12
- infrahub/core/constants/__init__.py +1 -0
- infrahub/core/constants/infrahubkind.py +1 -0
- infrahub/core/convert_object_type/object_conversion.py +201 -0
- infrahub/core/convert_object_type/repository_conversion.py +89 -0
- infrahub/core/convert_object_type/schema_mapping.py +27 -3
- infrahub/core/diff/model/path.py +4 -0
- infrahub/core/diff/payload_builder.py +1 -1
- infrahub/core/diff/query/artifact.py +1 -1
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/initialization.py +2 -2
- infrahub/core/ipam/utilization.py +1 -1
- infrahub/core/manager.py +9 -84
- infrahub/core/migrations/graph/__init__.py +6 -0
- infrahub/core/migrations/graph/m040_profile_attrs_in_db.py +166 -0
- infrahub/core/migrations/graph/m041_create_hfid_display_label_in_db.py +97 -0
- infrahub/core/migrations/graph/m042_backfill_hfid_display_label_in_db.py +86 -0
- infrahub/core/migrations/schema/node_attribute_add.py +5 -2
- infrahub/core/migrations/shared.py +5 -6
- infrahub/core/node/__init__.py +165 -42
- infrahub/core/node/constraints/attribute_uniqueness.py +3 -1
- infrahub/core/node/create.py +67 -35
- infrahub/core/node/lock_utils.py +98 -0
- infrahub/core/node/node_property_attribute.py +230 -0
- infrahub/core/node/standard.py +1 -1
- infrahub/core/property.py +11 -0
- infrahub/core/protocols.py +8 -1
- infrahub/core/query/attribute.py +27 -15
- infrahub/core/query/node.py +61 -185
- infrahub/core/query/relationship.py +43 -26
- infrahub/core/query/subquery.py +0 -8
- infrahub/core/registry.py +2 -2
- infrahub/core/relationship/constraints/count.py +1 -1
- infrahub/core/relationship/model.py +60 -20
- infrahub/core/schema/attribute_schema.py +0 -2
- infrahub/core/schema/basenode_schema.py +42 -2
- infrahub/core/schema/definitions/core/__init__.py +2 -0
- infrahub/core/schema/definitions/core/generator.py +2 -0
- infrahub/core/schema/definitions/core/group.py +16 -2
- infrahub/core/schema/definitions/core/repository.py +7 -0
- infrahub/core/schema/definitions/internal.py +14 -1
- infrahub/core/schema/generated/base_node_schema.py +6 -1
- infrahub/core/schema/node_schema.py +5 -2
- infrahub/core/schema/relationship_schema.py +0 -1
- infrahub/core/schema/schema_branch.py +137 -2
- infrahub/core/schema/schema_branch_display.py +123 -0
- infrahub/core/schema/schema_branch_hfid.py +114 -0
- infrahub/core/validators/aggregated_checker.py +1 -1
- infrahub/core/validators/determiner.py +12 -1
- infrahub/core/validators/relationship/peer.py +1 -1
- infrahub/core/validators/tasks.py +1 -1
- infrahub/display_labels/__init__.py +0 -0
- infrahub/display_labels/gather.py +48 -0
- infrahub/display_labels/models.py +240 -0
- infrahub/display_labels/tasks.py +186 -0
- infrahub/display_labels/triggers.py +22 -0
- infrahub/events/group_action.py +1 -1
- infrahub/events/node_action.py +1 -1
- infrahub/generators/constants.py +7 -0
- infrahub/generators/models.py +38 -12
- infrahub/generators/tasks.py +34 -16
- infrahub/git/base.py +38 -1
- infrahub/git/integrator.py +22 -14
- infrahub/graphql/analyzer.py +1 -1
- infrahub/graphql/api/dependencies.py +2 -4
- infrahub/graphql/api/endpoints.py +2 -2
- infrahub/graphql/app.py +2 -4
- infrahub/graphql/initialization.py +2 -3
- infrahub/graphql/manager.py +212 -137
- infrahub/graphql/middleware.py +12 -0
- infrahub/graphql/mutations/branch.py +11 -0
- infrahub/graphql/mutations/computed_attribute.py +110 -3
- infrahub/graphql/mutations/convert_object_type.py +34 -13
- infrahub/graphql/mutations/display_label.py +111 -0
- infrahub/graphql/mutations/generator.py +25 -7
- infrahub/graphql/mutations/hfid.py +118 -0
- infrahub/graphql/mutations/ipam.py +21 -8
- infrahub/graphql/mutations/main.py +37 -153
- infrahub/graphql/mutations/profile.py +195 -0
- infrahub/graphql/mutations/proposed_change.py +2 -1
- infrahub/graphql/mutations/relationship.py +2 -2
- infrahub/graphql/mutations/repository.py +22 -83
- infrahub/graphql/mutations/resource_manager.py +2 -2
- infrahub/graphql/mutations/schema.py +5 -5
- infrahub/graphql/mutations/webhook.py +1 -1
- infrahub/graphql/queries/resource_manager.py +1 -1
- infrahub/graphql/registry.py +173 -0
- infrahub/graphql/resolvers/resolver.py +2 -0
- infrahub/graphql/schema.py +8 -1
- infrahub/groups/tasks.py +1 -1
- infrahub/hfid/__init__.py +0 -0
- infrahub/hfid/gather.py +48 -0
- infrahub/hfid/models.py +240 -0
- infrahub/hfid/tasks.py +185 -0
- infrahub/hfid/triggers.py +22 -0
- infrahub/lock.py +67 -30
- infrahub/locks/__init__.py +0 -0
- infrahub/locks/tasks.py +37 -0
- infrahub/middleware.py +26 -1
- infrahub/patch/plan_writer.py +2 -2
- infrahub/profiles/__init__.py +0 -0
- infrahub/profiles/node_applier.py +101 -0
- infrahub/profiles/queries/__init__.py +0 -0
- infrahub/profiles/queries/get_profile_data.py +99 -0
- infrahub/profiles/tasks.py +63 -0
- infrahub/proposed_change/tasks.py +10 -1
- infrahub/repositories/__init__.py +0 -0
- infrahub/repositories/create_repository.py +113 -0
- infrahub/server.py +16 -3
- infrahub/services/__init__.py +8 -5
- infrahub/tasks/registry.py +6 -4
- infrahub/trigger/catalogue.py +4 -0
- infrahub/trigger/models.py +2 -0
- infrahub/trigger/tasks.py +3 -0
- infrahub/webhook/models.py +1 -1
- infrahub/workflows/catalogue.py +110 -3
- infrahub/workflows/initialization.py +16 -0
- infrahub/workflows/models.py +17 -2
- infrahub_sdk/branch.py +5 -8
- infrahub_sdk/checks.py +1 -1
- infrahub_sdk/client.py +364 -84
- infrahub_sdk/convert_object_type.py +61 -0
- infrahub_sdk/ctl/check.py +2 -3
- infrahub_sdk/ctl/cli_commands.py +18 -12
- infrahub_sdk/ctl/config.py +8 -2
- infrahub_sdk/ctl/generator.py +6 -3
- infrahub_sdk/ctl/graphql.py +184 -0
- infrahub_sdk/ctl/repository.py +39 -1
- infrahub_sdk/ctl/schema.py +18 -3
- infrahub_sdk/ctl/utils.py +4 -0
- infrahub_sdk/ctl/validate.py +5 -3
- infrahub_sdk/diff.py +4 -5
- infrahub_sdk/exceptions.py +2 -0
- infrahub_sdk/generator.py +7 -1
- infrahub_sdk/graphql/__init__.py +12 -0
- infrahub_sdk/graphql/constants.py +1 -0
- infrahub_sdk/graphql/plugin.py +85 -0
- infrahub_sdk/graphql/query.py +77 -0
- infrahub_sdk/{graphql.py → graphql/renderers.py} +88 -75
- infrahub_sdk/graphql/utils.py +40 -0
- infrahub_sdk/node/attribute.py +2 -0
- infrahub_sdk/node/node.py +28 -20
- infrahub_sdk/playback.py +1 -2
- infrahub_sdk/protocols.py +54 -6
- infrahub_sdk/pytest_plugin/plugin.py +7 -4
- infrahub_sdk/pytest_plugin/utils.py +40 -0
- infrahub_sdk/repository.py +1 -2
- infrahub_sdk/schema/__init__.py +38 -0
- infrahub_sdk/schema/main.py +1 -0
- infrahub_sdk/schema/repository.py +8 -0
- infrahub_sdk/spec/object.py +120 -7
- infrahub_sdk/spec/range_expansion.py +118 -0
- infrahub_sdk/timestamp.py +18 -6
- infrahub_sdk/transforms.py +1 -1
- {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b1.dist-info}/METADATA +9 -11
- {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b1.dist-info}/RECORD +177 -134
- infrahub_testcontainers/container.py +1 -1
- infrahub_testcontainers/docker-compose-cluster.test.yml +1 -1
- infrahub_testcontainers/docker-compose.test.yml +1 -1
- infrahub_testcontainers/models.py +2 -2
- infrahub_testcontainers/performance_test.py +4 -4
- infrahub/core/convert_object_type/conversion.py +0 -134
- {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b1.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b1.dist-info}/WHEEL +0 -0
- {infrahub_server-1.4.10.dist-info → infrahub_server-1.5.0b1.dist-info}/entry_points.txt +0 -0
infrahub/hfid/tasks.py
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import cast
|
|
4
|
+
|
|
5
|
+
from prefect import flow
|
|
6
|
+
from prefect.logging import get_run_logger
|
|
7
|
+
|
|
8
|
+
from infrahub.context import InfrahubContext # noqa: TC001 needed for prefect flow
|
|
9
|
+
from infrahub.core.registry import registry
|
|
10
|
+
from infrahub.events import BranchDeletedEvent
|
|
11
|
+
from infrahub.trigger.models import TriggerSetupReport, TriggerType
|
|
12
|
+
from infrahub.trigger.setup import setup_triggers_specific
|
|
13
|
+
from infrahub.workers.dependencies import get_client, get_component, get_database, get_workflow
|
|
14
|
+
from infrahub.workflows.catalogue import HFID_PROCESS, TRIGGER_UPDATE_HFID
|
|
15
|
+
from infrahub.workflows.utils import add_tags, wait_for_schema_to_converge
|
|
16
|
+
|
|
17
|
+
from .gather import gather_trigger_hfid
|
|
18
|
+
from .models import HFIDGraphQL, HFIDGraphQLResponse, HFIDTriggerDefinition
|
|
19
|
+
|
|
20
|
+
UPDATE_HFID = """
|
|
21
|
+
mutation UpdateHFID(
|
|
22
|
+
$id: String!,
|
|
23
|
+
$kind: String!,
|
|
24
|
+
$value: [String!]!
|
|
25
|
+
) {
|
|
26
|
+
InfrahubUpdateHFID(
|
|
27
|
+
data: {id: $id, value: $value, kind: $kind}
|
|
28
|
+
) {
|
|
29
|
+
ok
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@flow(
|
|
36
|
+
name="hfid-update-value",
|
|
37
|
+
flow_run_name="Update value for hfid on {node_kind}",
|
|
38
|
+
)
|
|
39
|
+
async def hfid_update_value(
|
|
40
|
+
branch_name: str,
|
|
41
|
+
obj: HFIDGraphQLResponse,
|
|
42
|
+
node_kind: str,
|
|
43
|
+
hfid_definition: list[str],
|
|
44
|
+
) -> None:
|
|
45
|
+
log = get_run_logger()
|
|
46
|
+
client = get_client()
|
|
47
|
+
|
|
48
|
+
await add_tags(branches=[branch_name], nodes=[obj.node_id], db_change=True)
|
|
49
|
+
|
|
50
|
+
rendered_hfid: list[str] = []
|
|
51
|
+
for hfid_component in hfid_definition:
|
|
52
|
+
if hfid_component in obj.variables:
|
|
53
|
+
rendered_hfid.append(obj.variables[hfid_component])
|
|
54
|
+
# value = await template.render(variables=obj.variables)
|
|
55
|
+
if rendered_hfid == obj.hfid_value:
|
|
56
|
+
log.debug(f"Ignoring to update {obj} with existing value on human_friendly_id={obj.hfid_value}")
|
|
57
|
+
return
|
|
58
|
+
|
|
59
|
+
await client.execute_graphql(
|
|
60
|
+
query=UPDATE_HFID,
|
|
61
|
+
variables={"id": obj.node_id, "kind": node_kind, "value": rendered_hfid},
|
|
62
|
+
branch_name=branch_name,
|
|
63
|
+
)
|
|
64
|
+
log.info(f"Updating {node_kind}.human_friendly_id='{rendered_hfid}' ({obj.node_id})")
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
@flow(
|
|
68
|
+
name="hfid-process",
|
|
69
|
+
flow_run_name="Process human friendly ids for {target_kind}",
|
|
70
|
+
)
|
|
71
|
+
async def process_hfid(
|
|
72
|
+
branch_name: str,
|
|
73
|
+
node_kind: str,
|
|
74
|
+
object_id: str,
|
|
75
|
+
target_kind: str,
|
|
76
|
+
context: InfrahubContext, # noqa: ARG001
|
|
77
|
+
) -> None:
|
|
78
|
+
log = get_run_logger()
|
|
79
|
+
client = get_client()
|
|
80
|
+
|
|
81
|
+
await add_tags(branches=[branch_name])
|
|
82
|
+
|
|
83
|
+
target_schema = branch_name if branch_name in registry.get_altered_schema_branches() else registry.default_branch
|
|
84
|
+
schema_branch = registry.schema.get_schema_branch(name=target_schema)
|
|
85
|
+
node_schema = schema_branch.get_node(name=target_kind, duplicate=False)
|
|
86
|
+
|
|
87
|
+
if node_kind == target_kind:
|
|
88
|
+
hfid_definition = schema_branch.hfids.get_node_definition(kind=node_kind)
|
|
89
|
+
else:
|
|
90
|
+
hfid_definition = schema_branch.hfids.get_related_definition(related_kind=node_kind, target_kind=target_kind)
|
|
91
|
+
|
|
92
|
+
# jinja_template = Jinja2Template(template=display_label_template.template)
|
|
93
|
+
# variables = jinja_template.get_variables()
|
|
94
|
+
hfid_graphql = HFIDGraphQL(
|
|
95
|
+
node_schema=node_schema, variables=hfid_definition.hfid, filter_key=hfid_definition.filter_key
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
query = hfid_graphql.render_graphql_query(filter_id=object_id)
|
|
99
|
+
response = await client.execute_graphql(query=query, branch_name=branch_name)
|
|
100
|
+
update_candidates = hfid_graphql.parse_response(response=response)
|
|
101
|
+
|
|
102
|
+
if not update_candidates:
|
|
103
|
+
log.debug("No nodes found that requires updates")
|
|
104
|
+
return
|
|
105
|
+
|
|
106
|
+
batch = await client.create_batch()
|
|
107
|
+
for node in update_candidates:
|
|
108
|
+
batch.add(
|
|
109
|
+
task=hfid_update_value,
|
|
110
|
+
branch_name=branch_name,
|
|
111
|
+
obj=node,
|
|
112
|
+
node_kind=node_schema.kind,
|
|
113
|
+
hfid_definition=hfid_definition.hfid,
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
_ = [response async for _, response in batch.execute()]
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
@flow(name="hfid-setup", flow_run_name="Setup human friendly ids in task-manager")
|
|
120
|
+
async def hfid_setup(context: InfrahubContext, branch_name: str | None = None, event_name: str | None = None) -> None:
|
|
121
|
+
database = await get_database()
|
|
122
|
+
async with database.start_session() as db:
|
|
123
|
+
log = get_run_logger()
|
|
124
|
+
|
|
125
|
+
if branch_name:
|
|
126
|
+
await add_tags(branches=[branch_name])
|
|
127
|
+
component = await get_component()
|
|
128
|
+
await wait_for_schema_to_converge(branch_name=branch_name, component=component, db=db, log=log)
|
|
129
|
+
|
|
130
|
+
report: TriggerSetupReport = await setup_triggers_specific(
|
|
131
|
+
gatherer=gather_trigger_hfid, trigger_type=TriggerType.HUMAN_FRIENDLY_ID
|
|
132
|
+
) # type: ignore[misc]
|
|
133
|
+
|
|
134
|
+
# Configure all DisplayLabelTriggerDefinitions in Prefect
|
|
135
|
+
hfid_reports = [cast(HFIDTriggerDefinition, entry) for entry in report.updated + report.created]
|
|
136
|
+
direct_target_triggers = [hfid_report for hfid_report in hfid_reports if hfid_report.target_kind]
|
|
137
|
+
|
|
138
|
+
for display_report in direct_target_triggers:
|
|
139
|
+
if event_name != BranchDeletedEvent.event_name and display_report.branch == branch_name:
|
|
140
|
+
await get_workflow().submit_workflow(
|
|
141
|
+
workflow=TRIGGER_UPDATE_HFID,
|
|
142
|
+
context=context,
|
|
143
|
+
parameters={
|
|
144
|
+
"branch_name": display_report.branch,
|
|
145
|
+
"kind": display_report.target_kind,
|
|
146
|
+
},
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
log.info(f"{report.in_use_count} HFID automation configurations completed")
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
@flow(
|
|
153
|
+
name="trigger-update-hfid",
|
|
154
|
+
flow_run_name="Trigger updates for display labels for kind",
|
|
155
|
+
)
|
|
156
|
+
async def trigger_update_hfid(
|
|
157
|
+
branch_name: str,
|
|
158
|
+
kind: str,
|
|
159
|
+
context: InfrahubContext,
|
|
160
|
+
) -> None:
|
|
161
|
+
await add_tags(branches=[branch_name])
|
|
162
|
+
|
|
163
|
+
client = get_client()
|
|
164
|
+
|
|
165
|
+
# NOTE we only need the id of the nodes, this query will still query for the HFID
|
|
166
|
+
node_schema = registry.schema.get_node_schema(name=kind, branch=branch_name)
|
|
167
|
+
nodes = await client.all(
|
|
168
|
+
kind=kind,
|
|
169
|
+
branch=branch_name,
|
|
170
|
+
exclude=node_schema.attribute_names + node_schema.relationship_names,
|
|
171
|
+
populate_store=False,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
for node in nodes:
|
|
175
|
+
await get_workflow().submit_workflow(
|
|
176
|
+
workflow=HFID_PROCESS,
|
|
177
|
+
context=context,
|
|
178
|
+
parameters={
|
|
179
|
+
"branch_name": branch_name,
|
|
180
|
+
"node_kind": kind,
|
|
181
|
+
"target_kind": kind,
|
|
182
|
+
"object_id": node.id,
|
|
183
|
+
"context": context,
|
|
184
|
+
},
|
|
185
|
+
)
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from infrahub.events.branch_action import BranchDeletedEvent
|
|
2
|
+
from infrahub.events.schema_action import SchemaUpdatedEvent
|
|
3
|
+
from infrahub.trigger.models import BuiltinTriggerDefinition, EventTrigger, ExecuteWorkflow
|
|
4
|
+
from infrahub.workflows.catalogue import HFID_SETUP
|
|
5
|
+
|
|
6
|
+
TRIGGER_HFID_ALL_SCHEMA = BuiltinTriggerDefinition(
|
|
7
|
+
name="hfid-setup-all",
|
|
8
|
+
trigger=EventTrigger(events={SchemaUpdatedEvent.event_name, BranchDeletedEvent.event_name}),
|
|
9
|
+
actions=[
|
|
10
|
+
ExecuteWorkflow(
|
|
11
|
+
workflow=HFID_SETUP,
|
|
12
|
+
parameters={
|
|
13
|
+
"branch_name": "{{ event.resource['infrahub.branch.name'] }}",
|
|
14
|
+
"event_name": "{{ event.event }}",
|
|
15
|
+
"context": {
|
|
16
|
+
"__prefect_kind": "json",
|
|
17
|
+
"value": {"__prefect_kind": "jinja", "template": "{{ event.payload['context'] | tojson }}"},
|
|
18
|
+
},
|
|
19
|
+
},
|
|
20
|
+
),
|
|
21
|
+
],
|
|
22
|
+
)
|
infrahub/lock.py
CHANGED
|
@@ -25,6 +25,7 @@ registry: InfrahubLockRegistry = None
|
|
|
25
25
|
|
|
26
26
|
|
|
27
27
|
METRIC_PREFIX = "infrahub_lock"
|
|
28
|
+
LOCK_PREFIX = "lock"
|
|
28
29
|
|
|
29
30
|
LOCK_ACQUIRE_TIME_METRICS = Histogram(
|
|
30
31
|
f"{METRIC_PREFIX}_acquire_seconds",
|
|
@@ -97,10 +98,10 @@ class NATSLock:
|
|
|
97
98
|
while True:
|
|
98
99
|
if await self.do_acquire(token):
|
|
99
100
|
self.token = token
|
|
100
|
-
return
|
|
101
|
+
return
|
|
101
102
|
await sleep(0.1) # default Redis GlobalLock value
|
|
102
103
|
|
|
103
|
-
async def do_acquire(self, token: str) -> bool:
|
|
104
|
+
async def do_acquire(self, token: str) -> bool | None:
|
|
104
105
|
return await self.service.cache.set(key=self.name, value=token, not_exists=True)
|
|
105
106
|
|
|
106
107
|
async def release(self) -> None:
|
|
@@ -123,14 +124,14 @@ class InfrahubLock:
|
|
|
123
124
|
local: bool | None = None,
|
|
124
125
|
in_multi: bool = False,
|
|
125
126
|
) -> None:
|
|
126
|
-
self.use_local: bool = local
|
|
127
|
+
self.use_local: bool | None = local
|
|
127
128
|
self.local: LocalLock = None
|
|
128
129
|
self.remote: GlobalLock = None
|
|
129
130
|
self.name: str = name
|
|
130
131
|
self.connection: redis.Redis | None = connection
|
|
131
132
|
self.in_multi: bool = in_multi
|
|
132
133
|
self.lock_type: str = "multi" if self.in_multi else "individual"
|
|
133
|
-
self.
|
|
134
|
+
self._acquire_time: int | None = None
|
|
134
135
|
self.event = asyncio.Event()
|
|
135
136
|
|
|
136
137
|
if not self.connection or (self.use_local is None and name.startswith("local.")):
|
|
@@ -139,9 +140,20 @@ class InfrahubLock:
|
|
|
139
140
|
if self.use_local:
|
|
140
141
|
self.local = LocalLock()
|
|
141
142
|
elif config.SETTINGS.cache.driver == config.CacheDriver.Redis:
|
|
142
|
-
self.remote = GlobalLock(redis=self.connection, name=f"
|
|
143
|
+
self.remote = GlobalLock(redis=self.connection, name=f"{LOCK_PREFIX}.{self.name}")
|
|
143
144
|
else:
|
|
144
|
-
self.remote = NATSLock(service=self.connection, name=f"
|
|
145
|
+
self.remote = NATSLock(service=self.connection, name=f"{LOCK_PREFIX}.{self.name}")
|
|
146
|
+
|
|
147
|
+
@property
|
|
148
|
+
def acquire_time(self) -> int:
|
|
149
|
+
if self._acquire_time is not None:
|
|
150
|
+
return self._acquire_time
|
|
151
|
+
|
|
152
|
+
raise ValueError("The lock has not been initialized")
|
|
153
|
+
|
|
154
|
+
@acquire_time.setter
|
|
155
|
+
def acquire_time(self, value: int) -> None:
|
|
156
|
+
self._acquire_time = value
|
|
145
157
|
|
|
146
158
|
async def __aenter__(self):
|
|
147
159
|
await self.acquire()
|
|
@@ -179,9 +191,54 @@ class InfrahubLock:
|
|
|
179
191
|
return self.local.locked()
|
|
180
192
|
|
|
181
193
|
|
|
194
|
+
class LockNameGenerator:
|
|
195
|
+
local = "local"
|
|
196
|
+
_global = "global"
|
|
197
|
+
|
|
198
|
+
def generate_name(self, name: str, namespace: str | None = None, local: bool | None = None) -> str:
|
|
199
|
+
if namespace is None and local is None:
|
|
200
|
+
return name
|
|
201
|
+
|
|
202
|
+
new_name = ""
|
|
203
|
+
if local is True:
|
|
204
|
+
new_name = f"{self.local}."
|
|
205
|
+
elif local is False:
|
|
206
|
+
new_name = f"{self._global}."
|
|
207
|
+
|
|
208
|
+
if namespace is not None:
|
|
209
|
+
new_name += f"{namespace}."
|
|
210
|
+
new_name += name
|
|
211
|
+
|
|
212
|
+
return new_name
|
|
213
|
+
|
|
214
|
+
def unpack_name(self, name: str) -> tuple[str, str | None, bool | None]:
|
|
215
|
+
local = None
|
|
216
|
+
namespace = None
|
|
217
|
+
|
|
218
|
+
parts = name.split(".")
|
|
219
|
+
if parts[0] == self.local:
|
|
220
|
+
local = True
|
|
221
|
+
parts = parts[1:]
|
|
222
|
+
elif parts[0] == self._global:
|
|
223
|
+
local = False
|
|
224
|
+
parts = parts[1:]
|
|
225
|
+
|
|
226
|
+
if len(parts) > 1:
|
|
227
|
+
namespace = parts[0]
|
|
228
|
+
original_name = ".".join(parts[1:])
|
|
229
|
+
else:
|
|
230
|
+
original_name = parts[0]
|
|
231
|
+
|
|
232
|
+
return original_name, namespace, local
|
|
233
|
+
|
|
234
|
+
|
|
182
235
|
class InfrahubLockRegistry:
|
|
183
236
|
def __init__(
|
|
184
|
-
self,
|
|
237
|
+
self,
|
|
238
|
+
token: str | None = None,
|
|
239
|
+
local_only: bool = False,
|
|
240
|
+
service: InfrahubServices | None = None,
|
|
241
|
+
name_generator: LockNameGenerator | None = None,
|
|
185
242
|
) -> None:
|
|
186
243
|
if config.SETTINGS.cache.enable and not local_only:
|
|
187
244
|
if config.SETTINGS.cache.driver == config.CacheDriver.Redis:
|
|
@@ -201,23 +258,7 @@ class InfrahubLockRegistry:
|
|
|
201
258
|
|
|
202
259
|
self.token = token or str(uuid.uuid4())
|
|
203
260
|
self.locks: dict[str, InfrahubLock] = {}
|
|
204
|
-
|
|
205
|
-
@classmethod
|
|
206
|
-
def _generate_name(cls, name: str, namespace: str | None = None, local: bool | None = None) -> str:
|
|
207
|
-
if namespace is None and local is None:
|
|
208
|
-
return name
|
|
209
|
-
|
|
210
|
-
new_name = ""
|
|
211
|
-
if local is True:
|
|
212
|
-
new_name = "local."
|
|
213
|
-
elif local is False:
|
|
214
|
-
new_name = "global."
|
|
215
|
-
|
|
216
|
-
if namespace is not None:
|
|
217
|
-
new_name += f"{namespace}."
|
|
218
|
-
new_name += name
|
|
219
|
-
|
|
220
|
-
return new_name
|
|
261
|
+
self.name_generator = name_generator or LockNameGenerator()
|
|
221
262
|
|
|
222
263
|
def get_existing(
|
|
223
264
|
self,
|
|
@@ -225,7 +266,7 @@ class InfrahubLockRegistry:
|
|
|
225
266
|
namespace: str | None,
|
|
226
267
|
local: bool | None = None,
|
|
227
268
|
) -> InfrahubLock | None:
|
|
228
|
-
lock_name = self.
|
|
269
|
+
lock_name = self.name_generator.generate_name(name=name, namespace=namespace, local=local)
|
|
229
270
|
if lock_name not in self.locks:
|
|
230
271
|
return None
|
|
231
272
|
return self.locks[lock_name]
|
|
@@ -233,7 +274,7 @@ class InfrahubLockRegistry:
|
|
|
233
274
|
def get(
|
|
234
275
|
self, name: str, namespace: str | None = None, local: bool | None = None, in_multi: bool = False
|
|
235
276
|
) -> InfrahubLock:
|
|
236
|
-
lock_name = self.
|
|
277
|
+
lock_name = self.name_generator.generate_name(name=name, namespace=namespace, local=local)
|
|
237
278
|
if lock_name not in self.locks:
|
|
238
279
|
self.locks[lock_name] = InfrahubLock(name=lock_name, connection=self.connection, in_multi=in_multi)
|
|
239
280
|
return self.locks[lock_name]
|
|
@@ -257,7 +298,3 @@ class InfrahubLockRegistry:
|
|
|
257
298
|
def initialize_lock(local_only: bool = False, service: InfrahubServices | None = None) -> None:
|
|
258
299
|
global registry
|
|
259
300
|
registry = InfrahubLockRegistry(local_only=local_only, service=service)
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
def build_object_lock_name(name: str) -> str:
|
|
263
|
-
return f"global.object.{name}"
|
|
File without changes
|
infrahub/locks/tasks.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from prefect import flow
|
|
4
|
+
from prefect.logging import get_run_logger
|
|
5
|
+
|
|
6
|
+
from infrahub import config
|
|
7
|
+
from infrahub.core.registry import registry
|
|
8
|
+
from infrahub.core.timestamp import Timestamp
|
|
9
|
+
from infrahub.lock import LOCK_PREFIX
|
|
10
|
+
from infrahub.services import InfrahubServices # noqa: TC001 needed for prefect flow
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@flow(
|
|
14
|
+
name="clean-up-deadlocks",
|
|
15
|
+
flow_run_name="Clean up deadlocks",
|
|
16
|
+
)
|
|
17
|
+
async def clean_up_deadlocks(service: InfrahubServices) -> None:
|
|
18
|
+
"""Remove stale distributed locks left behind by inactive workers"""
|
|
19
|
+
keys = await service.cache.list_keys(filter_pattern=f"{LOCK_PREFIX}*")
|
|
20
|
+
if not keys:
|
|
21
|
+
return
|
|
22
|
+
|
|
23
|
+
log = get_run_logger()
|
|
24
|
+
values = await service.cache.get_values(keys=keys)
|
|
25
|
+
workers = await service.component.list_workers(branch=registry.default_branch, schema_hash=False)
|
|
26
|
+
workers_active = {worker.id for worker in workers if worker.active}
|
|
27
|
+
|
|
28
|
+
for key, value in zip(keys, values, strict=False):
|
|
29
|
+
if not key or not value:
|
|
30
|
+
continue
|
|
31
|
+
|
|
32
|
+
timestamp, worker_id = value.split("::", 1)
|
|
33
|
+
if worker_id not in workers_active and Timestamp() > Timestamp(timestamp).add(
|
|
34
|
+
minutes=config.SETTINGS.cache.clean_up_deadlocks_interval_mins
|
|
35
|
+
):
|
|
36
|
+
await service.cache.delete(key)
|
|
37
|
+
log.info(f"Deleted deadlock key={key} worker={worker_id}")
|
infrahub/middleware.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
from typing import Any
|
|
2
2
|
|
|
3
|
+
from fastapi.middleware.gzip import GZipMiddleware
|
|
3
4
|
from starlette.middleware.cors import CORSMiddleware
|
|
4
|
-
from starlette.types import ASGIApp
|
|
5
|
+
from starlette.types import ASGIApp, Receive, Scope, Send
|
|
5
6
|
|
|
6
7
|
from infrahub import config
|
|
7
8
|
|
|
@@ -15,3 +16,27 @@ class InfrahubCORSMiddleware(CORSMiddleware):
|
|
|
15
16
|
kwargs["allow_headers"] = config.SETTINGS.api.cors_allow_headers
|
|
16
17
|
|
|
17
18
|
super().__init__(app, *args, **kwargs)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ConditionalGZipMiddleware(GZipMiddleware):
|
|
22
|
+
def __init__(
|
|
23
|
+
self,
|
|
24
|
+
app: ASGIApp,
|
|
25
|
+
*,
|
|
26
|
+
minimum_size: int = 500,
|
|
27
|
+
compresslevel: int = 9,
|
|
28
|
+
include_paths: tuple[str, ...] = (),
|
|
29
|
+
) -> None:
|
|
30
|
+
super().__init__(app, minimum_size=minimum_size, compresslevel=compresslevel)
|
|
31
|
+
self.include_paths = include_paths
|
|
32
|
+
|
|
33
|
+
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: # type: ignore[override]
|
|
34
|
+
if scope["type"] != "http":
|
|
35
|
+
await self.app(scope, receive, send)
|
|
36
|
+
return
|
|
37
|
+
|
|
38
|
+
path = scope.get("path", "")
|
|
39
|
+
if any(path.startswith(include) for include in self.include_paths):
|
|
40
|
+
await super().__call__(scope, receive, send)
|
|
41
|
+
else:
|
|
42
|
+
await self.app(scope, receive, send)
|
infrahub/patch/plan_writer.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from dataclasses import asdict
|
|
3
|
-
from datetime import
|
|
3
|
+
from datetime import UTC, datetime
|
|
4
4
|
from pathlib import Path
|
|
5
5
|
from typing import Any
|
|
6
6
|
|
|
@@ -10,7 +10,7 @@ from .models import EdgeToAdd, EdgeToDelete, EdgeToUpdate, PatchPlan, VertexToAd
|
|
|
10
10
|
|
|
11
11
|
class PatchPlanWriter:
|
|
12
12
|
def write(self, patches_directory: Path, patch_plan: PatchPlan) -> Path:
|
|
13
|
-
timestamp_str = datetime.now(tz=
|
|
13
|
+
timestamp_str = datetime.now(tz=UTC).strftime("%Y%m%d-%H%M%S")
|
|
14
14
|
patch_name = f"patch-{patch_plan.name}-{timestamp_str}"
|
|
15
15
|
patch_plan_directory = patches_directory / Path(patch_name)
|
|
16
16
|
if not patch_plan_directory.exists():
|
|
File without changes
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from infrahub.core.attribute import BaseAttribute
|
|
4
|
+
from infrahub.core.branch import Branch
|
|
5
|
+
from infrahub.core.node import Node
|
|
6
|
+
from infrahub.database import InfrahubDatabase
|
|
7
|
+
|
|
8
|
+
from .queries.get_profile_data import GetProfileDataQuery, ProfileData
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class NodeProfilesApplier:
|
|
12
|
+
def __init__(self, db: InfrahubDatabase, branch: Branch):
|
|
13
|
+
self.db = db
|
|
14
|
+
self.branch = branch
|
|
15
|
+
|
|
16
|
+
async def _get_profile_ids(self, node: Node) -> list[str]:
|
|
17
|
+
try:
|
|
18
|
+
profiles_rel = node.get_relationship("profiles")
|
|
19
|
+
except ValueError:
|
|
20
|
+
return []
|
|
21
|
+
profile_rels = await profiles_rel.get_relationships(db=self.db)
|
|
22
|
+
return [pr.peer_id for pr in profile_rels if pr.peer_id]
|
|
23
|
+
|
|
24
|
+
async def _get_attr_names_for_profiles(self, node: Node) -> list[str]:
|
|
25
|
+
node_schema = node.get_schema()
|
|
26
|
+
|
|
27
|
+
# get the names of attributes that could be affected by profile changes
|
|
28
|
+
attr_names_for_profiles: list[str] = []
|
|
29
|
+
for attr_schema in node_schema.attributes:
|
|
30
|
+
attr_name = attr_schema.name
|
|
31
|
+
node_attr: BaseAttribute = getattr(node, attr_name)
|
|
32
|
+
if node_attr.is_from_profile or node_attr.is_default:
|
|
33
|
+
attr_names_for_profiles.append(attr_name)
|
|
34
|
+
return attr_names_for_profiles
|
|
35
|
+
|
|
36
|
+
async def _get_sorted_profile_data(
|
|
37
|
+
self, profile_ids: list[str], attr_names_for_profiles: list[str]
|
|
38
|
+
) -> list[ProfileData]:
|
|
39
|
+
if not profile_ids:
|
|
40
|
+
return []
|
|
41
|
+
query = await GetProfileDataQuery.init(
|
|
42
|
+
db=self.db, branch=self.branch, profile_ids=profile_ids, attr_names=attr_names_for_profiles
|
|
43
|
+
)
|
|
44
|
+
await query.execute(db=self.db)
|
|
45
|
+
profile_data_list = query.get_profile_data()
|
|
46
|
+
return sorted(profile_data_list, key=lambda x: (x.priority, x.uuid))
|
|
47
|
+
|
|
48
|
+
def _apply_profile_to_attribute(self, node_attr: BaseAttribute, profile_value: Any, profile_id: str) -> bool:
|
|
49
|
+
is_changed = False
|
|
50
|
+
if node_attr.value != profile_value:
|
|
51
|
+
node_attr.value = profile_value
|
|
52
|
+
is_changed = True
|
|
53
|
+
if node_attr.is_default is not False:
|
|
54
|
+
node_attr.is_default = False
|
|
55
|
+
is_changed = True
|
|
56
|
+
if node_attr.is_from_profile is not True:
|
|
57
|
+
node_attr.is_from_profile = True
|
|
58
|
+
is_changed = True
|
|
59
|
+
if node_attr.source_id != profile_id: # type: ignore[attr-defined]
|
|
60
|
+
node_attr.set_source(value=profile_id)
|
|
61
|
+
is_changed = True
|
|
62
|
+
return is_changed
|
|
63
|
+
|
|
64
|
+
def _remove_profile_from_attribute(self, node_attr: BaseAttribute) -> None:
|
|
65
|
+
node_attr.clear_source()
|
|
66
|
+
node_attr.value = node_attr.schema.default_value
|
|
67
|
+
node_attr.is_default = True
|
|
68
|
+
node_attr.is_from_profile = False
|
|
69
|
+
|
|
70
|
+
async def apply_profiles(self, node: Node) -> list[str]:
|
|
71
|
+
profile_ids = await self._get_profile_ids(node=node)
|
|
72
|
+
attr_names_for_profiles = await self._get_attr_names_for_profiles(node=node)
|
|
73
|
+
|
|
74
|
+
if not attr_names_for_profiles:
|
|
75
|
+
return []
|
|
76
|
+
|
|
77
|
+
# get profiles priorities and attribute values on branch
|
|
78
|
+
sorted_profile_data = await self._get_sorted_profile_data(
|
|
79
|
+
profile_ids=profile_ids, attr_names_for_profiles=attr_names_for_profiles
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
updated_field_names = []
|
|
83
|
+
# set attribute values/is_default/is_from_profile on nodes
|
|
84
|
+
for attr_name in attr_names_for_profiles:
|
|
85
|
+
has_profile_data = False
|
|
86
|
+
node_attr = node.get_attribute(attr_name)
|
|
87
|
+
for profile_data in sorted_profile_data:
|
|
88
|
+
profile_value = profile_data.attribute_values.get(attr_name)
|
|
89
|
+
if profile_value is not None:
|
|
90
|
+
has_profile_data = True
|
|
91
|
+
is_changed = False
|
|
92
|
+
is_changed = self._apply_profile_to_attribute(
|
|
93
|
+
node_attr=node_attr, profile_value=profile_value, profile_id=profile_data.uuid
|
|
94
|
+
)
|
|
95
|
+
if is_changed:
|
|
96
|
+
updated_field_names.append(attr_name)
|
|
97
|
+
break
|
|
98
|
+
if not has_profile_data and node_attr.is_from_profile:
|
|
99
|
+
self._remove_profile_from_attribute(node_attr=node_attr)
|
|
100
|
+
updated_field_names.append(attr_name)
|
|
101
|
+
return updated_field_names
|
|
File without changes
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from infrahub.core.constants import NULL_VALUE
|
|
5
|
+
from infrahub.core.query import Query, QueryType
|
|
6
|
+
from infrahub.database import InfrahubDatabase
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class ProfileData:
|
|
11
|
+
uuid: str
|
|
12
|
+
priority: float | int
|
|
13
|
+
attribute_values: dict[str, Any]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class GetProfileDataQuery(Query):
|
|
17
|
+
type: QueryType = QueryType.READ
|
|
18
|
+
insert_return: bool = False
|
|
19
|
+
|
|
20
|
+
def __init__(self, *args: Any, profile_ids: list[str], attr_names: list[str], **kwargs: Any):
|
|
21
|
+
super().__init__(*args, **kwargs)
|
|
22
|
+
self.profile_ids = profile_ids
|
|
23
|
+
self.attr_names = attr_names
|
|
24
|
+
|
|
25
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
26
|
+
branch_filter, branch_params = self.branch.get_query_filter_path(at=self.at.to_string())
|
|
27
|
+
self.params.update(branch_params)
|
|
28
|
+
self.params["profile_ids"] = self.profile_ids
|
|
29
|
+
self.params["attr_names"] = self.attr_names + ["profile_priority"]
|
|
30
|
+
|
|
31
|
+
query = """
|
|
32
|
+
// --------------
|
|
33
|
+
// get the Profile nodes
|
|
34
|
+
// --------------
|
|
35
|
+
MATCH (profile:Node)
|
|
36
|
+
WHERE profile.uuid IN $profile_ids
|
|
37
|
+
// --------------
|
|
38
|
+
// make sure we only use the active ones
|
|
39
|
+
// --------------
|
|
40
|
+
CALL (profile) {
|
|
41
|
+
MATCH (profile)-[r:IS_PART_OF]->(:Root)
|
|
42
|
+
WHERE %(branch_filter)s
|
|
43
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
44
|
+
RETURN r.status = "active" AS is_active
|
|
45
|
+
}
|
|
46
|
+
WITH profile
|
|
47
|
+
WHERE is_active = TRUE
|
|
48
|
+
// --------------
|
|
49
|
+
// get the attributes that we care about
|
|
50
|
+
// --------------
|
|
51
|
+
MATCH (profile)-[:HAS_ATTRIBUTE]-(attr:Attribute)
|
|
52
|
+
WHERE attr.name IN $attr_names
|
|
53
|
+
WITH DISTINCT profile, attr
|
|
54
|
+
CALL (profile, attr) {
|
|
55
|
+
MATCH (profile)-[r:HAS_ATTRIBUTE]->(attr)
|
|
56
|
+
WHERE %(branch_filter)s
|
|
57
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
58
|
+
RETURN r.status = "active" AS is_active
|
|
59
|
+
}
|
|
60
|
+
WITH profile, attr, is_active
|
|
61
|
+
WHERE is_active = TRUE
|
|
62
|
+
// --------------
|
|
63
|
+
// get the attribute values
|
|
64
|
+
// --------------
|
|
65
|
+
MATCH (attr)-[:HAS_VALUE]->(av:AttributeValue)
|
|
66
|
+
WITH DISTINCT profile, attr, av
|
|
67
|
+
CALL (attr, av) {
|
|
68
|
+
MATCH (attr)-[r:HAS_VALUE]->(av)
|
|
69
|
+
WHERE %(branch_filter)s
|
|
70
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
71
|
+
RETURN r.status = "active" AS is_active
|
|
72
|
+
}
|
|
73
|
+
WITH profile, attr, av
|
|
74
|
+
WHERE is_active = TRUE
|
|
75
|
+
RETURN profile.uuid AS profile_uuid, attr.name AS attr_name, av.value AS attr_value
|
|
76
|
+
""" % {"branch_filter": branch_filter}
|
|
77
|
+
self.add_to_query(query)
|
|
78
|
+
self.return_labels = ["profile_uuid", "attr_name", "attr_value"]
|
|
79
|
+
|
|
80
|
+
def get_profile_data(self) -> list[ProfileData]:
|
|
81
|
+
profile_data_by_uuid: dict[str, ProfileData] = {}
|
|
82
|
+
for result in self.results:
|
|
83
|
+
profile_uuid = result.get_as_type(label="profile_uuid", return_type=str)
|
|
84
|
+
if profile_uuid not in profile_data_by_uuid:
|
|
85
|
+
profile_data_by_uuid[profile_uuid] = ProfileData(
|
|
86
|
+
uuid=profile_uuid, priority=float("inf"), attribute_values={}
|
|
87
|
+
)
|
|
88
|
+
profile_data = profile_data_by_uuid[profile_uuid]
|
|
89
|
+
attr_name = result.get_as_type(label="attr_name", return_type=str)
|
|
90
|
+
attr_value: Any = result.get(label="attr_value")
|
|
91
|
+
if attr_value == NULL_VALUE:
|
|
92
|
+
attr_value = None
|
|
93
|
+
if attr_name == "profile_priority":
|
|
94
|
+
if attr_value is not None and not isinstance(attr_value, int):
|
|
95
|
+
attr_value = int(attr_value)
|
|
96
|
+
profile_data.priority = attr_value
|
|
97
|
+
else:
|
|
98
|
+
profile_data.attribute_values[attr_name] = attr_value
|
|
99
|
+
return list(profile_data_by_uuid.values())
|