infrahub-server 1.4.12__py3-none-any.whl → 1.4.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/cli/tasks.py +46 -0
- infrahub/core/diff/calculator.py +2 -2
- infrahub/core/diff/query/delete_query.py +9 -5
- infrahub/core/diff/query/merge.py +39 -23
- infrahub/core/migrations/graph/__init__.py +2 -0
- infrahub/core/migrations/graph/m041_deleted_dup_edges.py +149 -0
- infrahub/core/query/diff.py +61 -16
- infrahub/core/query/node.py +35 -33
- infrahub/git/base.py +4 -1
- infrahub/git/tasks.py +52 -2
- infrahub/graphql/analyzer.py +9 -0
- infrahub/message_bus/types.py +1 -0
- infrahub/proposed_change/tasks.py +43 -9
- infrahub/services/adapters/http/__init__.py +5 -0
- infrahub/services/adapters/workflow/worker.py +9 -1
- infrahub/task_manager/task.py +73 -0
- infrahub/workers/infrahub_async.py +5 -1
- infrahub/workflows/utils.py +2 -1
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.4.13.dist-info}/METADATA +1 -1
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.4.13.dist-info}/RECORD +23 -22
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.4.13.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.4.13.dist-info}/WHEEL +0 -0
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.4.13.dist-info}/entry_points.txt +0 -0
infrahub/cli/tasks.py
CHANGED
|
@@ -3,9 +3,11 @@ import logging
|
|
|
3
3
|
import typer
|
|
4
4
|
from infrahub_sdk.async_typer import AsyncTyper
|
|
5
5
|
from prefect.client.orchestration import get_client
|
|
6
|
+
from prefect.client.schemas.objects import StateType
|
|
6
7
|
|
|
7
8
|
from infrahub import config
|
|
8
9
|
from infrahub.services.adapters.workflow.worker import WorkflowWorkerExecution
|
|
10
|
+
from infrahub.task_manager.task import PrefectTask
|
|
9
11
|
from infrahub.tasks.dummy import DUMMY_FLOW, DummyInput
|
|
10
12
|
from infrahub.workflows.initialization import setup_task_manager
|
|
11
13
|
from infrahub.workflows.models import WorkerPoolDefinition
|
|
@@ -50,3 +52,47 @@ async def execute(
|
|
|
50
52
|
workflow=DUMMY_FLOW, parameters={"data": DummyInput(firstname="John", lastname="Doe")}
|
|
51
53
|
) # type: ignore[var-annotated]
|
|
52
54
|
print(result)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
flush_app = AsyncTyper()
|
|
58
|
+
|
|
59
|
+
app.add_typer(flush_app, name="flush")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@flush_app.command()
|
|
63
|
+
async def flow_runs(
|
|
64
|
+
ctx: typer.Context, # noqa: ARG001
|
|
65
|
+
config_file: str = typer.Argument("infrahub.toml", envvar="INFRAHUB_CONFIG"),
|
|
66
|
+
days_to_keep: int = 30,
|
|
67
|
+
batch_size: int = 100,
|
|
68
|
+
) -> None:
|
|
69
|
+
"""Flush old task runs"""
|
|
70
|
+
logging.getLogger("infrahub").setLevel(logging.WARNING)
|
|
71
|
+
logging.getLogger("neo4j").setLevel(logging.ERROR)
|
|
72
|
+
logging.getLogger("prefect").setLevel(logging.ERROR)
|
|
73
|
+
|
|
74
|
+
config.load_and_exit(config_file_name=config_file)
|
|
75
|
+
|
|
76
|
+
await PrefectTask.delete_flow_runs(
|
|
77
|
+
days_to_keep=days_to_keep,
|
|
78
|
+
batch_size=batch_size,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
@flush_app.command()
|
|
83
|
+
async def stale_runs(
|
|
84
|
+
ctx: typer.Context, # noqa: ARG001
|
|
85
|
+
config_file: str = typer.Argument("infrahub.toml", envvar="INFRAHUB_CONFIG"),
|
|
86
|
+
days_to_keep: int = 2,
|
|
87
|
+
batch_size: int = 100,
|
|
88
|
+
) -> None:
|
|
89
|
+
"""Flush stale task runs"""
|
|
90
|
+
logging.getLogger("infrahub").setLevel(logging.WARNING)
|
|
91
|
+
logging.getLogger("neo4j").setLevel(logging.ERROR)
|
|
92
|
+
logging.getLogger("prefect").setLevel(logging.ERROR)
|
|
93
|
+
|
|
94
|
+
config.load_and_exit(config_file_name=config_file)
|
|
95
|
+
|
|
96
|
+
await PrefectTask.delete_flow_runs(
|
|
97
|
+
states=[StateType.RUNNING], delete=False, days_to_keep=days_to_keep, batch_size=batch_size
|
|
98
|
+
)
|
infrahub/core/diff/calculator.py
CHANGED
|
@@ -141,8 +141,8 @@ class DiffCalculator:
|
|
|
141
141
|
to_time=to_time,
|
|
142
142
|
previous_node_field_specifiers=previous_node_specifiers,
|
|
143
143
|
)
|
|
144
|
-
node_limit = int(config.SETTINGS.database.query_size_limit / 10)
|
|
145
|
-
fields_limit = int(config.SETTINGS.database.query_size_limit / 3)
|
|
144
|
+
node_limit = max(int(config.SETTINGS.database.query_size_limit / 10), 1)
|
|
145
|
+
fields_limit = max(int(config.SETTINGS.database.query_size_limit / 3), 1)
|
|
146
146
|
properties_limit = config.SETTINGS.database.query_size_limit
|
|
147
147
|
|
|
148
148
|
calculation_request = DiffCalculationRequest(
|
|
@@ -20,10 +20,14 @@ class EnrichedDiffDeleteQuery(Query):
|
|
|
20
20
|
diff_filter = "WHERE d_root.uuid IN $diff_root_uuids"
|
|
21
21
|
|
|
22
22
|
query = """
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
23
|
+
MATCH (d_root:DiffRoot)
|
|
24
|
+
%(diff_filter)s
|
|
25
|
+
OPTIONAL MATCH (d_root)-[*]->(diff_thing)
|
|
26
|
+
WITH DISTINCT d_root, diff_thing
|
|
27
|
+
ORDER BY elementId(diff_thing)
|
|
28
|
+
CALL (diff_thing) {
|
|
29
|
+
DETACH DELETE diff_thing
|
|
30
|
+
} IN TRANSACTIONS
|
|
31
|
+
DETACH DELETE d_root
|
|
28
32
|
""" % {"diff_filter": diff_filter}
|
|
29
33
|
self.add_to_query(query=query)
|
|
@@ -49,11 +49,18 @@ WITH node_diff_map, is_node_kind_migration, CASE
|
|
|
49
49
|
WHEN is_node_kind_migration THEN $migrated_kinds_id_map[node_diff_map.uuid]
|
|
50
50
|
ELSE NULL
|
|
51
51
|
END AS node_db_id
|
|
52
|
+
|
|
53
|
+
// ------------------------------
|
|
54
|
+
// find the correct Node if the Node had its kind/inheritance migrated
|
|
55
|
+
// and there are multiple Nodes with the same UUID
|
|
56
|
+
// ------------------------------
|
|
52
57
|
CALL (node_diff_map, node_db_id) {
|
|
53
|
-
MATCH (n:Node {uuid: node_diff_map.uuid})
|
|
54
|
-
WHERE node_db_id IS NULL
|
|
55
|
-
|
|
58
|
+
MATCH (n:Node {uuid: node_diff_map.uuid})-[n_is_part_of:IS_PART_OF]->(:Root)
|
|
59
|
+
WHERE node_db_id IS NULL OR %(id_func)s(n) = node_db_id
|
|
60
|
+
AND n_is_part_of.branch IN [$source_branch, $target_branch]
|
|
56
61
|
RETURN n
|
|
62
|
+
ORDER BY n_is_part_of.branch_level DESC, n_is_part_of.from DESC, n_is_part_of.status ASC
|
|
63
|
+
LIMIT 1
|
|
57
64
|
}
|
|
58
65
|
WITH n, node_diff_map, is_node_kind_migration
|
|
59
66
|
CALL (n, node_diff_map, is_node_kind_migration) {
|
|
@@ -224,16 +231,28 @@ CALL (n, node_diff_map, is_node_kind_migration) {
|
|
|
224
231
|
ELSE NULL
|
|
225
232
|
END AS rel_peer_db_id
|
|
226
233
|
// ------------------------------
|
|
234
|
+
// find the correct relationship peer if the peer had its kind/inheritance migrated
|
|
235
|
+
// and there are multiple Nodes with the same UUID
|
|
236
|
+
// ------------------------------
|
|
237
|
+
CALL (rel_peer_id, rel_peer_db_id) {
|
|
238
|
+
MATCH (rel_peer:Node {uuid: rel_peer_id})-[target_is_part_of:IS_PART_OF]->(:Root)
|
|
239
|
+
WHERE (rel_peer_db_id IS NULL OR %(id_func)s(rel_peer) = rel_peer_db_id)
|
|
240
|
+
AND target_is_part_of.branch IN [$source_branch, $target_branch]
|
|
241
|
+
RETURN rel_peer
|
|
242
|
+
ORDER BY target_is_part_of.branch_level DESC, target_is_part_of.from DESC, target_is_part_of.status ASC
|
|
243
|
+
LIMIT 1
|
|
244
|
+
}
|
|
245
|
+
WITH rel_name, related_rel_status, rel_peer
|
|
246
|
+
// ------------------------------
|
|
227
247
|
// determine the directions of each IS_RELATED
|
|
228
248
|
// ------------------------------
|
|
229
|
-
CALL (n, rel_name,
|
|
249
|
+
CALL (n, rel_name, rel_peer, related_rel_status) {
|
|
230
250
|
MATCH (n)
|
|
231
251
|
-[source_r_rel_1:IS_RELATED]
|
|
232
252
|
-(r:Relationship {name: rel_name})
|
|
233
253
|
-[source_r_rel_2:IS_RELATED]
|
|
234
|
-
-(rel_peer
|
|
235
|
-
WHERE
|
|
236
|
-
AND source_r_rel_1.branch IN [$source_branch, $target_branch]
|
|
254
|
+
-(rel_peer)
|
|
255
|
+
WHERE source_r_rel_1.branch IN [$source_branch, $target_branch]
|
|
237
256
|
AND source_r_rel_2.branch IN [$source_branch, $target_branch]
|
|
238
257
|
AND source_r_rel_1.from <= $at AND source_r_rel_1.to IS NULL
|
|
239
258
|
AND source_r_rel_2.from <= $at AND source_r_rel_2.to IS NULL
|
|
@@ -251,37 +270,34 @@ CALL (n, node_diff_map, is_node_kind_migration) {
|
|
|
251
270
|
source_r_rel_1.hierarchy AS r1_hierarchy,
|
|
252
271
|
source_r_rel_2.hierarchy AS r2_hierarchy
|
|
253
272
|
}
|
|
254
|
-
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name,
|
|
255
|
-
CALL (n, rel_name,
|
|
273
|
+
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status
|
|
274
|
+
CALL (n, rel_name, rel_peer, related_rel_status) {
|
|
256
275
|
OPTIONAL MATCH (n)
|
|
257
276
|
-[target_r_rel_1:IS_RELATED {branch: $target_branch, status: "active"}]
|
|
258
277
|
-(:Relationship {name: rel_name})
|
|
259
278
|
-[target_r_rel_2:IS_RELATED {branch: $target_branch, status: "active"}]
|
|
260
|
-
-(rel_peer
|
|
279
|
+
-(rel_peer)
|
|
261
280
|
WHERE related_rel_status = "deleted"
|
|
262
|
-
AND (rel_peer_db_id IS NULL OR %(id_func)s(rel_peer) = rel_peer_db_id)
|
|
263
281
|
AND target_r_rel_1.from <= $at AND target_r_rel_1.to IS NULL
|
|
264
282
|
AND target_r_rel_2.from <= $at AND target_r_rel_2.to IS NULL
|
|
265
283
|
SET target_r_rel_1.to = $at
|
|
266
284
|
SET target_r_rel_2.to = $at
|
|
267
285
|
}
|
|
268
|
-
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name,
|
|
286
|
+
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status
|
|
269
287
|
// ------------------------------
|
|
270
288
|
// conditionally create new IS_RELATED relationships on target_branch, if necessary
|
|
271
289
|
// ------------------------------
|
|
272
|
-
CALL (n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name,
|
|
273
|
-
MATCH (p:Node {uuid: rel_peer_id})
|
|
274
|
-
WHERE rel_peer_db_id IS NULL OR %(id_func)s(p) = rel_peer_db_id
|
|
290
|
+
CALL (n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status) {
|
|
275
291
|
OPTIONAL MATCH (n)
|
|
276
292
|
-[r_rel_1:IS_RELATED {branch: $target_branch, status: related_rel_status}]
|
|
277
293
|
-(:Relationship {name: rel_name})
|
|
278
294
|
-[r_rel_2:IS_RELATED {branch: $target_branch, status: related_rel_status}]
|
|
279
|
-
-(
|
|
295
|
+
-(rel_peer)
|
|
280
296
|
WHERE r_rel_1.from <= $at
|
|
281
297
|
AND (r_rel_1.to >= $at OR r_rel_1.to IS NULL)
|
|
282
298
|
AND r_rel_2.from <= $at
|
|
283
299
|
AND (r_rel_2.to >= $at OR r_rel_2.to IS NULL)
|
|
284
|
-
WITH
|
|
300
|
+
WITH rel_peer, r_rel_1, r_rel_2
|
|
285
301
|
WHERE r_rel_1 IS NULL
|
|
286
302
|
AND r_rel_2 IS NULL
|
|
287
303
|
// ------------------------------
|
|
@@ -301,19 +317,19 @@ CALL (n, node_diff_map, is_node_kind_migration) {
|
|
|
301
317
|
<-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r1_hierarchy}]
|
|
302
318
|
-(r)
|
|
303
319
|
}
|
|
304
|
-
CALL (r,
|
|
305
|
-
WITH r,
|
|
320
|
+
CALL (r, rel_peer, r2_dir, r2_hierarchy, related_rel_status) {
|
|
321
|
+
WITH r, rel_peer, r2_dir, r2_hierarchy, related_rel_status
|
|
306
322
|
WHERE r2_dir = "r"
|
|
307
323
|
CREATE (r)
|
|
308
324
|
-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r2_hierarchy}]
|
|
309
|
-
->(
|
|
325
|
+
->(rel_peer)
|
|
310
326
|
}
|
|
311
|
-
CALL (r,
|
|
312
|
-
WITH r,
|
|
327
|
+
CALL (r, rel_peer, r2_dir, r2_hierarchy, related_rel_status) {
|
|
328
|
+
WITH r, rel_peer, r2_dir, r2_hierarchy, related_rel_status
|
|
313
329
|
WHERE r2_dir = "l"
|
|
314
330
|
CREATE (r)
|
|
315
331
|
<-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r2_hierarchy}]
|
|
316
|
-
-(
|
|
332
|
+
-(rel_peer)
|
|
317
333
|
}
|
|
318
334
|
}
|
|
319
335
|
}
|
|
@@ -42,6 +42,7 @@ from .m037_index_attr_vals import Migration037
|
|
|
42
42
|
from .m038_redo_0000_prefix_fix import Migration038
|
|
43
43
|
from .m039_ipam_reconcile import Migration039
|
|
44
44
|
from .m040_duplicated_attributes import Migration040
|
|
45
|
+
from .m041_deleted_dup_edges import Migration041
|
|
45
46
|
|
|
46
47
|
if TYPE_CHECKING:
|
|
47
48
|
from infrahub.core.root import Root
|
|
@@ -89,6 +90,7 @@ MIGRATIONS: list[type[GraphMigration | InternalSchemaMigration | ArbitraryMigrat
|
|
|
89
90
|
Migration038,
|
|
90
91
|
Migration039,
|
|
91
92
|
Migration040,
|
|
93
|
+
Migration041,
|
|
92
94
|
]
|
|
93
95
|
|
|
94
96
|
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any
|
|
4
|
+
|
|
5
|
+
from rich import print as rprint
|
|
6
|
+
|
|
7
|
+
from infrahub.core.branch import Branch
|
|
8
|
+
from infrahub.core.diff.repository.repository import DiffRepository
|
|
9
|
+
from infrahub.core.initialization import get_root_node
|
|
10
|
+
from infrahub.core.migrations.shared import MigrationResult
|
|
11
|
+
from infrahub.core.query import Query, QueryType
|
|
12
|
+
from infrahub.dependencies.registry import build_component_registry, get_component_registry
|
|
13
|
+
from infrahub.log import get_logger
|
|
14
|
+
|
|
15
|
+
from ..shared import ArbitraryMigration
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from infrahub.database import InfrahubDatabase
|
|
19
|
+
|
|
20
|
+
log = get_logger()
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class DeletePosthumousEdges(Query):
|
|
24
|
+
name = "delete_posthumous_edges_query"
|
|
25
|
+
type = QueryType.WRITE
|
|
26
|
+
insert_return = False
|
|
27
|
+
|
|
28
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
|
|
29
|
+
query = """
|
|
30
|
+
// ------------
|
|
31
|
+
// find deleted nodes
|
|
32
|
+
// ------------
|
|
33
|
+
MATCH (n:Node)-[e:IS_PART_OF]->(:Root)
|
|
34
|
+
WHERE e.status = "deleted" OR e.to IS NOT NULL
|
|
35
|
+
WITH DISTINCT n, e.branch AS delete_branch, e.branch_level AS delete_branch_level, CASE
|
|
36
|
+
WHEN e.status = "deleted" THEN e.from
|
|
37
|
+
ELSE e.to
|
|
38
|
+
END AS delete_time
|
|
39
|
+
// ------------
|
|
40
|
+
// find the edges added to the deleted node after the delete time
|
|
41
|
+
// ------------
|
|
42
|
+
MATCH (n)-[added_e]-(peer)
|
|
43
|
+
WHERE added_e.from > delete_time
|
|
44
|
+
AND type(added_e) <> "IS_PART_OF"
|
|
45
|
+
// if the node was deleted on a branch (delete_branch_level > 1), and then updated on main/global (added_e.branch_level = 1), we can ignore it
|
|
46
|
+
AND added_e.branch_level >= delete_branch_level
|
|
47
|
+
AND (added_e.branch = delete_branch OR delete_branch_level = 1)
|
|
48
|
+
WITH DISTINCT n, delete_branch, delete_time, added_e, peer
|
|
49
|
+
// ------------
|
|
50
|
+
// get the branched_from for the branch on which the node was deleted
|
|
51
|
+
// ------------
|
|
52
|
+
CALL (added_e) {
|
|
53
|
+
MATCH (b:Branch {name: added_e.branch})
|
|
54
|
+
RETURN b.branched_from AS added_e_branched_from
|
|
55
|
+
}
|
|
56
|
+
// ------------
|
|
57
|
+
// account for the following situations, given that the edge update time is after the node delete time
|
|
58
|
+
// - deleted on main/global, updated on branch
|
|
59
|
+
// - illegal if the delete is before branch.branched_from
|
|
60
|
+
// - deleted on branch, updated on branch
|
|
61
|
+
// - illegal
|
|
62
|
+
// ------------
|
|
63
|
+
WITH n, delete_branch, delete_time, added_e, peer
|
|
64
|
+
WHERE delete_branch = added_e.branch
|
|
65
|
+
OR delete_time < added_e_branched_from
|
|
66
|
+
DELETE added_e
|
|
67
|
+
"""
|
|
68
|
+
self.add_to_query(query)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class DeleteDuplicateEdgesForMigratedKindNodes(Query):
|
|
72
|
+
name = "delete_duplicate_edges_for_migrated_kind_nodes_query"
|
|
73
|
+
type = QueryType.WRITE
|
|
74
|
+
insert_return = False
|
|
75
|
+
|
|
76
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
|
|
77
|
+
query = """
|
|
78
|
+
// ------------
|
|
79
|
+
// get UUIDs for migrated kind/inheritance nodes
|
|
80
|
+
// ------------
|
|
81
|
+
MATCH (n:Node)
|
|
82
|
+
WITH n.uuid AS node_uuid, count(*) AS num_nodes_with_uuid
|
|
83
|
+
WHERE num_nodes_with_uuid > 1
|
|
84
|
+
CALL (node_uuid) {
|
|
85
|
+
// ------------
|
|
86
|
+
// find any Relationships for these nodes
|
|
87
|
+
// ------------
|
|
88
|
+
MATCH (n:Node {uuid: node_uuid})-[:IS_RELATED]-(rel:Relationship)
|
|
89
|
+
WITH DISTINCT rel
|
|
90
|
+
MATCH (rel)-[e]->(peer)
|
|
91
|
+
WITH
|
|
92
|
+
type(e) AS e_type,
|
|
93
|
+
e.branch AS e_branch,
|
|
94
|
+
e.from AS e_from,
|
|
95
|
+
e.to AS e_to,
|
|
96
|
+
e.status AS e_status,
|
|
97
|
+
e.peer AS e_peer,
|
|
98
|
+
CASE
|
|
99
|
+
WHEN startNode(e) = rel THEN "out" ELSE "in"
|
|
100
|
+
END AS direction,
|
|
101
|
+
collect(e) AS duplicate_edges
|
|
102
|
+
WHERE size(duplicate_edges) > 1
|
|
103
|
+
WITH tail(duplicate_edges) AS duplicate_edges_to_delete
|
|
104
|
+
UNWIND duplicate_edges_to_delete AS edge_to_delete
|
|
105
|
+
DELETE edge_to_delete
|
|
106
|
+
} IN TRANSACTIONS OF 500 ROWS
|
|
107
|
+
"""
|
|
108
|
+
self.add_to_query(query)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class Migration041(ArbitraryMigration):
|
|
112
|
+
"""Clean up improper merges that duplicated edges to nodes with migrated kinds
|
|
113
|
+
|
|
114
|
+
- delete all existing diffs b/c they could contain incorrect nodes linking to deleted nodes with migrated kind/inheritance
|
|
115
|
+
- delete all edges added to any nodes AFTER they were deleted on main
|
|
116
|
+
- delete any duplicate edges touching migrated kind/inheritance nodes on main
|
|
117
|
+
"""
|
|
118
|
+
|
|
119
|
+
name: str = "041_deleted_dup_edges"
|
|
120
|
+
minimum_version: int = 40
|
|
121
|
+
|
|
122
|
+
async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
|
|
123
|
+
result = MigrationResult()
|
|
124
|
+
|
|
125
|
+
return result
|
|
126
|
+
|
|
127
|
+
async def execute(self, db: InfrahubDatabase) -> MigrationResult:
|
|
128
|
+
root_node = await get_root_node(db=db)
|
|
129
|
+
default_branch_name = root_node.default_branch
|
|
130
|
+
default_branch = await Branch.get_by_name(db=db, name=default_branch_name)
|
|
131
|
+
|
|
132
|
+
rprint("Deleting all diffs", end="...")
|
|
133
|
+
build_component_registry()
|
|
134
|
+
component_registry = get_component_registry()
|
|
135
|
+
diff_repo = await component_registry.get_component(DiffRepository, db=db, branch=default_branch)
|
|
136
|
+
await diff_repo.delete_all_diff_roots()
|
|
137
|
+
rprint("done")
|
|
138
|
+
|
|
139
|
+
rprint("Deleting edges merged after node deleted", end="...")
|
|
140
|
+
delete_posthumous_edges_query = await DeletePosthumousEdges.init(db=db)
|
|
141
|
+
await delete_posthumous_edges_query.execute(db=db)
|
|
142
|
+
rprint("done")
|
|
143
|
+
|
|
144
|
+
rprint("Deleting duplicate edges for migrated kind/inheritance nodes", end="...")
|
|
145
|
+
delete_duplicate_edges_query = await DeleteDuplicateEdgesForMigratedKindNodes.init(db=db)
|
|
146
|
+
await delete_duplicate_edges_query.execute(db=db)
|
|
147
|
+
rprint("done")
|
|
148
|
+
|
|
149
|
+
return MigrationResult()
|
infrahub/core/query/diff.py
CHANGED
|
@@ -301,13 +301,15 @@ WITH p, q, diff_rel, CASE
|
|
|
301
301
|
ELSE $from_time
|
|
302
302
|
END AS row_from_time
|
|
303
303
|
ORDER BY %(id_func)s(p) DESC
|
|
304
|
-
SKIP $offset
|
|
305
|
-
LIMIT $limit
|
|
304
|
+
SKIP toInteger($offset)
|
|
305
|
+
LIMIT toInteger($limit)
|
|
306
306
|
// -------------------------------------
|
|
307
307
|
// Add flag to indicate if there is more data after this
|
|
308
308
|
// -------------------------------------
|
|
309
309
|
WITH collect([p, q, diff_rel, row_from_time]) AS limited_results
|
|
310
|
-
|
|
310
|
+
// extra NULL row ensures that has_more_data is always returned, even if all results are filtered out below
|
|
311
|
+
WITH limited_results + [[NULL, NULL, NULL, NULL]] AS limited_results
|
|
312
|
+
WITH limited_results, size(limited_results) = ($limit + 1) AS has_more_data
|
|
311
313
|
UNWIND limited_results AS one_result
|
|
312
314
|
WITH one_result[0] AS p, one_result[1] AS q, one_result[2] AS diff_rel, one_result[3] AS row_from_time, has_more_data
|
|
313
315
|
// -------------------------------------
|
|
@@ -470,14 +472,16 @@ AND (
|
|
|
470
472
|
// Limit the number of paths
|
|
471
473
|
// -------------------------------------
|
|
472
474
|
WITH root, r_root, p, diff_rel, q
|
|
473
|
-
ORDER BY r_root.from, p.uuid, q.uuid, diff_rel.branch, diff_rel.from
|
|
474
|
-
SKIP $offset
|
|
475
|
-
LIMIT $limit
|
|
475
|
+
ORDER BY r_root.from, p.uuid, q.uuid, q.name, diff_rel.branch, diff_rel.from
|
|
476
|
+
SKIP toInteger($offset)
|
|
477
|
+
LIMIT toInteger($limit)
|
|
476
478
|
// -------------------------------------
|
|
477
479
|
// Add flag to indicate if there is more data after this
|
|
478
480
|
// -------------------------------------
|
|
479
481
|
WITH collect([root, r_root, p, diff_rel, q]) AS limited_results
|
|
480
|
-
|
|
482
|
+
// extra NULL row ensures that has_more_data is always returned, even if all results are filtered out below
|
|
483
|
+
WITH limited_results + [[NULL, NULL, NULL, NULL, NULL]] AS limited_results
|
|
484
|
+
WITH limited_results, size(limited_results) = ($limit + 1) AS has_more_data
|
|
481
485
|
UNWIND limited_results AS one_result
|
|
482
486
|
WITH one_result[0] AS root, one_result[1] AS r_root, one_result[2] AS p, one_result[3] AS diff_rel, one_result[4] AS q, has_more_data
|
|
483
487
|
// -------------------------------------
|
|
@@ -641,8 +645,28 @@ AND (
|
|
|
641
645
|
)
|
|
642
646
|
// skip paths where nodes/attrs/rels are updated after $from_time, those are handled in other queries
|
|
643
647
|
AND (
|
|
644
|
-
|
|
645
|
-
|
|
648
|
+
(
|
|
649
|
+
r_root.branch = diff_rel.branch
|
|
650
|
+
AND r_root.from <= $from_time
|
|
651
|
+
AND (r_root.to IS NULL OR r_root.to >= $to_time)
|
|
652
|
+
)
|
|
653
|
+
OR (
|
|
654
|
+
r_root.branch <> diff_rel.branch
|
|
655
|
+
AND r_root.from <= $from_time
|
|
656
|
+
AND (r_root.to IS NULL OR r_root.to >= $branch_from_time)
|
|
657
|
+
)
|
|
658
|
+
)
|
|
659
|
+
AND (
|
|
660
|
+
(
|
|
661
|
+
r_node.branch = diff_rel.branch
|
|
662
|
+
AND r_node.from <= $from_time
|
|
663
|
+
AND (r_node.to IS NULL OR r_node.to >= $to_time)
|
|
664
|
+
)
|
|
665
|
+
OR (
|
|
666
|
+
r_node.branch <> diff_rel.branch
|
|
667
|
+
AND r_node.from <= $from_time
|
|
668
|
+
AND (r_node.to IS NULL OR r_node.to >= $branch_from_time)
|
|
669
|
+
)
|
|
646
670
|
)
|
|
647
671
|
)
|
|
648
672
|
// time-based filters for new nodes
|
|
@@ -658,8 +682,27 @@ AND (
|
|
|
658
682
|
)
|
|
659
683
|
// skip paths where nodes/attrs/rels are updated after $branch_from_time, those are handled in other queries
|
|
660
684
|
AND (
|
|
661
|
-
|
|
662
|
-
|
|
685
|
+
(
|
|
686
|
+
r_root.branch = diff_rel.branch
|
|
687
|
+
AND (r_root.to IS NULL OR r_root.to >= $to_time)
|
|
688
|
+
)
|
|
689
|
+
OR (
|
|
690
|
+
r_root.branch <> diff_rel.branch
|
|
691
|
+
AND r_root.from <= $branch_from_time
|
|
692
|
+
AND (r_root.to IS NULL OR r_root.to >= $branch_from_time)
|
|
693
|
+
)
|
|
694
|
+
)
|
|
695
|
+
AND (
|
|
696
|
+
(
|
|
697
|
+
r_node.branch = diff_rel.branch
|
|
698
|
+
AND r_node.from <= $branch_from_time
|
|
699
|
+
AND (r_node.to IS NULL OR r_node.to >= $to_time)
|
|
700
|
+
)
|
|
701
|
+
OR (
|
|
702
|
+
r_node.branch <> diff_rel.branch
|
|
703
|
+
AND r_node.from <= $branch_from_time
|
|
704
|
+
AND (r_node.to IS NULL OR r_node.to >= $branch_from_time)
|
|
705
|
+
)
|
|
663
706
|
)
|
|
664
707
|
)
|
|
665
708
|
)
|
|
@@ -701,13 +744,15 @@ AND [%(id_func)s(n), type(r_node)] <> [%(id_func)s(q), type(diff_rel)]
|
|
|
701
744
|
// -------------------------------------
|
|
702
745
|
WITH diff_rel_path, r_root, n, r_node, p, diff_rel
|
|
703
746
|
ORDER BY r_root.from, n.uuid, p.uuid, type(diff_rel), diff_rel.branch, diff_rel.from
|
|
704
|
-
SKIP $offset
|
|
705
|
-
LIMIT $limit
|
|
747
|
+
SKIP toInteger($offset)
|
|
748
|
+
LIMIT toInteger($limit)
|
|
706
749
|
// -------------------------------------
|
|
707
750
|
// Add flag to indicate if there is more data after this
|
|
708
751
|
// -------------------------------------
|
|
709
752
|
WITH collect([diff_rel_path, r_root, n, r_node, p, diff_rel]) AS limited_results
|
|
710
|
-
|
|
753
|
+
// extra NULL row ensures that has_more_data is always returned, even if all results are filtered out below
|
|
754
|
+
WITH limited_results + [[NULL, NULL, NULL, NULL, NULL, NULL]] AS limited_results
|
|
755
|
+
WITH limited_results, size(limited_results) = ($limit + 1) AS has_more_data
|
|
711
756
|
UNWIND limited_results AS one_result
|
|
712
757
|
WITH one_result[0] AS diff_rel_path, one_result[1] AS r_root, one_result[2] AS n,
|
|
713
758
|
one_result[3] AS r_node, one_result[4] AS p, one_result[5] AS diff_rel, has_more_data
|
|
@@ -803,8 +848,8 @@ WHERE num_nodes_with_uuid > 1
|
|
|
803
848
|
// -------------------------------------
|
|
804
849
|
WITH node_uuid
|
|
805
850
|
ORDER BY node_uuid
|
|
806
|
-
SKIP $offset
|
|
807
|
-
LIMIT $limit
|
|
851
|
+
SKIP toInteger($offset)
|
|
852
|
+
LIMIT toInteger($limit)
|
|
808
853
|
WITH collect(node_uuid) AS node_uuids
|
|
809
854
|
WITH node_uuids, size(node_uuids) = $limit AS has_more_data
|
|
810
855
|
MATCH (:Root)<-[diff_rel:IS_PART_OF {branch: $branch_name}]-(n:Node)
|
infrahub/core/query/node.py
CHANGED
|
@@ -627,45 +627,46 @@ class NodeListGetAttributeQuery(Query):
|
|
|
627
627
|
self.add_to_query(query)
|
|
628
628
|
|
|
629
629
|
query = """
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
}
|
|
649
|
-
WITH n, r1, a1 as a, r2, av1 as av
|
|
650
|
-
WHERE r2.status = "active"
|
|
651
|
-
WITH n, a, av, r1, r2
|
|
630
|
+
CALL (n, a) {
|
|
631
|
+
MATCH (n)-[r:HAS_ATTRIBUTE]->(a:Attribute)
|
|
632
|
+
WHERE %(branch_filter)s
|
|
633
|
+
RETURN r.status = "active" AS is_active
|
|
634
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
635
|
+
LIMIT 1
|
|
636
|
+
}
|
|
637
|
+
WITH n, a
|
|
638
|
+
WHERE is_active = TRUE
|
|
639
|
+
CALL (a) {
|
|
640
|
+
MATCH (a)-[r:HAS_VALUE]->(av:AttributeValue)
|
|
641
|
+
WHERE %(branch_filter)s
|
|
642
|
+
RETURN av, r AS r2
|
|
643
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
644
|
+
LIMIT 1
|
|
645
|
+
}
|
|
646
|
+
WITH n, a, av, r2
|
|
647
|
+
WHERE r2.status = "active"
|
|
652
648
|
""" % {"branch_filter": branch_filter}
|
|
653
649
|
self.add_to_query(query)
|
|
654
650
|
|
|
655
|
-
self.return_labels = ["n", "a", "av", "
|
|
651
|
+
self.return_labels = ["n", "a", "av", "r2"]
|
|
656
652
|
|
|
657
653
|
# Add Is_Protected and Is_visible
|
|
658
|
-
rel_isv_branch_filter, _ = self.branch.get_query_filter_path(
|
|
659
|
-
at=self.at, branch_agnostic=self.branch_agnostic, variable_name="rel_isv"
|
|
660
|
-
)
|
|
661
|
-
rel_isp_branch_filter, _ = self.branch.get_query_filter_path(
|
|
662
|
-
at=self.at, branch_agnostic=self.branch_agnostic, variable_name="rel_isp"
|
|
663
|
-
)
|
|
664
654
|
query = """
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
655
|
+
CALL (a) {
|
|
656
|
+
MATCH (a)-[r:IS_VISIBLE]-(isv:Boolean)
|
|
657
|
+
WHERE (%(branch_filter)s)
|
|
658
|
+
RETURN r AS rel_isv, isv
|
|
659
|
+
ORDER BY rel_isv.branch_level DESC, rel_isv.from DESC, rel_isv.status ASC
|
|
660
|
+
LIMIT 1
|
|
661
|
+
}
|
|
662
|
+
CALL (a) {
|
|
663
|
+
MATCH (a)-[r:IS_PROTECTED]-(isp:Boolean)
|
|
664
|
+
WHERE (%(branch_filter)s)
|
|
665
|
+
RETURN r AS rel_isp, isp
|
|
666
|
+
ORDER BY rel_isp.branch_level DESC, rel_isp.from DESC, rel_isp.status ASC
|
|
667
|
+
LIMIT 1
|
|
668
|
+
}
|
|
669
|
+
""" % {"branch_filter": branch_filter}
|
|
669
670
|
self.add_to_query(query)
|
|
670
671
|
|
|
671
672
|
self.return_labels.extend(["isv", "isp", "rel_isv", "rel_isp"])
|
|
@@ -878,6 +879,7 @@ class NodeListGetRelationshipsQuery(Query):
|
|
|
878
879
|
RETURN DISTINCT n_uuid, rel_name, peer_uuid, direction
|
|
879
880
|
""" % {"filters": rels_filter}
|
|
880
881
|
self.add_to_query(query)
|
|
882
|
+
self.order_by = ["n_uuid", "rel_name", "peer_uuid", "direction"]
|
|
881
883
|
self.return_labels = ["n_uuid", "rel_name", "peer_uuid", "direction"]
|
|
882
884
|
|
|
883
885
|
def get_peers_group_by_node(self) -> GroupedPeerNodes:
|
infrahub/git/base.py
CHANGED
|
@@ -932,7 +932,10 @@ class InfrahubRepositoryBase(BaseModel, ABC):
|
|
|
932
932
|
def _raise_enriched_error_static(
|
|
933
933
|
error: GitCommandError, name: str, location: str, branch_name: str | None = None
|
|
934
934
|
) -> NoReturn:
|
|
935
|
-
if
|
|
935
|
+
if any(
|
|
936
|
+
err in error.stderr
|
|
937
|
+
for err in ("Repository not found", "does not appear to be a git", "Failed to connect to")
|
|
938
|
+
):
|
|
936
939
|
raise RepositoryConnectionError(identifier=name) from error
|
|
937
940
|
|
|
938
941
|
if "error: pathspec" in error.stderr:
|
infrahub/git/tasks.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
1
3
|
from infrahub_sdk import InfrahubClient
|
|
2
4
|
from infrahub_sdk.protocols import (
|
|
3
5
|
CoreArtifact,
|
|
@@ -14,7 +16,12 @@ from prefect.logging import get_run_logger
|
|
|
14
16
|
|
|
15
17
|
from infrahub import lock
|
|
16
18
|
from infrahub.context import InfrahubContext
|
|
17
|
-
from infrahub.core.constants import
|
|
19
|
+
from infrahub.core.constants import (
|
|
20
|
+
InfrahubKind,
|
|
21
|
+
RepositoryInternalStatus,
|
|
22
|
+
RepositoryOperationalStatus,
|
|
23
|
+
ValidatorConclusion,
|
|
24
|
+
)
|
|
18
25
|
from infrahub.core.manager import NodeManager
|
|
19
26
|
from infrahub.core.registry import registry
|
|
20
27
|
from infrahub.exceptions import CheckError, RepositoryError
|
|
@@ -152,6 +159,39 @@ async def create_branch(branch: str, branch_id: str) -> None:
|
|
|
152
159
|
pass
|
|
153
160
|
|
|
154
161
|
|
|
162
|
+
@flow(name="sync-git-repo-with-origin", flow_run_name="Sync git repo with origin")
|
|
163
|
+
async def sync_git_repo_with_origin_and_tag_on_failure(
|
|
164
|
+
client: InfrahubClient,
|
|
165
|
+
repository_id: str,
|
|
166
|
+
repository_name: str,
|
|
167
|
+
repository_location: str,
|
|
168
|
+
internal_status: str,
|
|
169
|
+
default_branch_name: str,
|
|
170
|
+
operational_status: str,
|
|
171
|
+
staging_branch: str | None = None,
|
|
172
|
+
infrahub_branch: str | None = None,
|
|
173
|
+
) -> None:
|
|
174
|
+
repo = await InfrahubRepository.init(
|
|
175
|
+
id=repository_id,
|
|
176
|
+
name=repository_name,
|
|
177
|
+
location=repository_location,
|
|
178
|
+
client=client,
|
|
179
|
+
internal_status=internal_status,
|
|
180
|
+
default_branch_name=default_branch_name,
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
try:
|
|
184
|
+
await repo.sync(staging_branch=staging_branch)
|
|
185
|
+
except RepositoryError:
|
|
186
|
+
if operational_status == RepositoryOperationalStatus.ONLINE.value:
|
|
187
|
+
params: dict[str, Any] = {
|
|
188
|
+
"branches": [infrahub_branch] if infrahub_branch else [],
|
|
189
|
+
"nodes": [str(repository_id)],
|
|
190
|
+
}
|
|
191
|
+
await add_tags(**params)
|
|
192
|
+
raise
|
|
193
|
+
|
|
194
|
+
|
|
155
195
|
@flow(name="git_repositories_sync", flow_run_name="Sync Git Repositories")
|
|
156
196
|
async def sync_remote_repositories() -> None:
|
|
157
197
|
log = get_run_logger()
|
|
@@ -204,7 +244,17 @@ async def sync_remote_repositories() -> None:
|
|
|
204
244
|
continue
|
|
205
245
|
|
|
206
246
|
try:
|
|
207
|
-
await
|
|
247
|
+
await sync_git_repo_with_origin_and_tag_on_failure(
|
|
248
|
+
client=client,
|
|
249
|
+
repository_id=repository_data.repository.id,
|
|
250
|
+
repository_name=repository_data.repository.name.value,
|
|
251
|
+
repository_location=repository_data.repository.location.value,
|
|
252
|
+
internal_status=active_internal_status,
|
|
253
|
+
default_branch_name=repository_data.repository.default_branch.value,
|
|
254
|
+
operational_status=repository_data.repository.operational_status.value,
|
|
255
|
+
staging_branch=staging_branch,
|
|
256
|
+
infrahub_branch=infrahub_branch,
|
|
257
|
+
)
|
|
208
258
|
# Tell workers to fetch to stay in sync
|
|
209
259
|
message = messages.RefreshGitFetch(
|
|
210
260
|
meta=Meta(initiator_id=WORKER_IDENTITY, request_id=get_log_data().get("request_id", "")),
|
infrahub/graphql/analyzer.py
CHANGED
|
@@ -312,6 +312,13 @@ class GraphQLQueryReport:
|
|
|
312
312
|
return []
|
|
313
313
|
|
|
314
314
|
def required_argument(self, argument: GraphQLArgument) -> bool:
|
|
315
|
+
if argument.name == "ids" and argument.kind == "list_value":
|
|
316
|
+
for variable in self.variables:
|
|
317
|
+
if f"['${variable.name}']" == argument.as_variable_name and variable.required:
|
|
318
|
+
return True
|
|
319
|
+
|
|
320
|
+
return False
|
|
321
|
+
|
|
315
322
|
if not argument.is_variable:
|
|
316
323
|
# If the argument isn't a variable it would have been
|
|
317
324
|
# statically defined in the input and as such required
|
|
@@ -364,6 +371,8 @@ class GraphQLQueryReport:
|
|
|
364
371
|
if [[argument.name]] == query.infrahub_model.uniqueness_constraints:
|
|
365
372
|
if self.required_argument(argument=argument):
|
|
366
373
|
targets_single_query = True
|
|
374
|
+
elif argument.name == "ids" and self.required_argument(argument=argument):
|
|
375
|
+
targets_single_query = True
|
|
367
376
|
|
|
368
377
|
if not targets_single_query:
|
|
369
378
|
return False
|
infrahub/message_bus/types.py
CHANGED
|
@@ -92,6 +92,7 @@ class ProposedChangeArtifactDefinition(BaseModel):
|
|
|
92
92
|
query_name: str # Deprecated
|
|
93
93
|
query_id: str
|
|
94
94
|
query_models: list[str]
|
|
95
|
+
query_payload: str = Field(..., description="GraphQL query")
|
|
95
96
|
repository_id: str
|
|
96
97
|
transform_kind: str
|
|
97
98
|
template_path: str = Field(default="")
|
|
@@ -11,6 +11,7 @@ import pytest
|
|
|
11
11
|
from infrahub_sdk.exceptions import ModuleImportError
|
|
12
12
|
from infrahub_sdk.node import InfrahubNode
|
|
13
13
|
from infrahub_sdk.protocols import (
|
|
14
|
+
CoreArtifactDefinition,
|
|
14
15
|
CoreArtifactValidator,
|
|
15
16
|
CoreGeneratorDefinition,
|
|
16
17
|
CoreGeneratorValidator,
|
|
@@ -44,7 +45,7 @@ from infrahub.core.diff.model.diff import DiffElementType, SchemaConflict
|
|
|
44
45
|
from infrahub.core.diff.model.path import NodeDiffFieldSummary
|
|
45
46
|
from infrahub.core.integrity.object_conflict.conflict_recorder import ObjectConflictValidatorRecorder
|
|
46
47
|
from infrahub.core.manager import NodeManager
|
|
47
|
-
from infrahub.core.protocols import
|
|
48
|
+
from infrahub.core.protocols import CoreDataCheck, CoreValidator
|
|
48
49
|
from infrahub.core.protocols import CoreProposedChange as InternalCoreProposedChange
|
|
49
50
|
from infrahub.core.timestamp import Timestamp
|
|
50
51
|
from infrahub.core.validators.checks_runner import run_checks_and_update_validator
|
|
@@ -59,6 +60,8 @@ from infrahub.git.base import extract_repo_file_information
|
|
|
59
60
|
from infrahub.git.models import TriggerRepositoryInternalChecks, TriggerRepositoryUserChecks
|
|
60
61
|
from infrahub.git.repository import InfrahubRepository, get_initialized_repo
|
|
61
62
|
from infrahub.git.utils import fetch_artifact_definition_targets, fetch_proposed_change_generator_definition_targets
|
|
63
|
+
from infrahub.graphql.analyzer import InfrahubGraphQLQueryAnalyzer
|
|
64
|
+
from infrahub.graphql.initialization import prepare_graphql_params
|
|
62
65
|
from infrahub.log import get_logger
|
|
63
66
|
from infrahub.message_bus.types import (
|
|
64
67
|
ProposedChangeArtifactDefinition,
|
|
@@ -664,6 +667,27 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
|
|
|
664
667
|
repository = model.branch_diff.get_repository(repository_id=model.artifact_definition.repository_id)
|
|
665
668
|
impacted_artifacts = model.branch_diff.get_subscribers_ids(kind=InfrahubKind.ARTIFACT)
|
|
666
669
|
|
|
670
|
+
source_schema_branch = registry.schema.get_schema_branch(name=model.source_branch)
|
|
671
|
+
source_branch = registry.get_branch_from_registry(branch=model.source_branch)
|
|
672
|
+
|
|
673
|
+
graphql_params = await prepare_graphql_params(db=await get_database(), branch=model.source_branch)
|
|
674
|
+
query_analyzer = InfrahubGraphQLQueryAnalyzer(
|
|
675
|
+
query=model.artifact_definition.query_payload,
|
|
676
|
+
branch=source_branch,
|
|
677
|
+
schema_branch=source_schema_branch,
|
|
678
|
+
schema=graphql_params.schema,
|
|
679
|
+
)
|
|
680
|
+
|
|
681
|
+
only_has_unique_targets = query_analyzer.query_report.only_has_unique_targets
|
|
682
|
+
if not only_has_unique_targets:
|
|
683
|
+
log.warning(
|
|
684
|
+
f"Artifact definition {artifact_definition.name.value} query does not guarantee unique targets. All targets will be processed."
|
|
685
|
+
)
|
|
686
|
+
|
|
687
|
+
managed_branch = model.source_branch_sync_with_git and model.branch_diff.has_file_modifications
|
|
688
|
+
if managed_branch:
|
|
689
|
+
log.info("Source branch is synced with Git repositories with updates, all artifacts will be processed")
|
|
690
|
+
|
|
667
691
|
checks = []
|
|
668
692
|
|
|
669
693
|
for relationship in group.members.peers:
|
|
@@ -671,8 +695,9 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
|
|
|
671
695
|
artifact_id = artifacts_by_member.get(member.id)
|
|
672
696
|
if _should_render_artifact(
|
|
673
697
|
artifact_id=artifact_id,
|
|
674
|
-
managed_branch=
|
|
698
|
+
managed_branch=managed_branch,
|
|
675
699
|
impacted_artifacts=impacted_artifacts,
|
|
700
|
+
only_has_unique_targets=only_has_unique_targets,
|
|
676
701
|
):
|
|
677
702
|
log.info(f"Trigger Artifact processing for {member.display_label}")
|
|
678
703
|
|
|
@@ -718,21 +743,26 @@ async def validate_artifacts_generation(model: RequestArtifactDefinitionCheck, c
|
|
|
718
743
|
)
|
|
719
744
|
|
|
720
745
|
|
|
721
|
-
def _should_render_artifact(
|
|
746
|
+
def _should_render_artifact(
|
|
747
|
+
artifact_id: str | None,
|
|
748
|
+
managed_branch: bool,
|
|
749
|
+
impacted_artifacts: list[str],
|
|
750
|
+
only_has_unique_targets: bool,
|
|
751
|
+
) -> bool:
|
|
722
752
|
"""Returns a boolean to indicate if an artifact should be generated or not.
|
|
723
753
|
Will return true if:
|
|
724
754
|
* The artifact_id wasn't set which could be that it's a new object that doesn't have a previous artifact
|
|
725
|
-
* The source
|
|
755
|
+
* The source branch is not data only which would indicate that it could contain updates in git to the transform
|
|
726
756
|
* The artifact_id exists in the impacted_artifacts list
|
|
757
|
+
* The query failes the only_has_unique_targets check
|
|
727
758
|
Will return false if:
|
|
728
759
|
* The source branch is a data only branch and the artifact_id exists and is not in the impacted list
|
|
729
760
|
"""
|
|
730
761
|
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
return True
|
|
762
|
+
if not only_has_unique_targets or not artifact_id or managed_branch:
|
|
763
|
+
return True
|
|
764
|
+
|
|
765
|
+
return artifact_id in impacted_artifacts
|
|
736
766
|
|
|
737
767
|
|
|
738
768
|
@flow(
|
|
@@ -1249,6 +1279,9 @@ query GatherArtifactDefinitions {
|
|
|
1249
1279
|
name {
|
|
1250
1280
|
value
|
|
1251
1281
|
}
|
|
1282
|
+
query {
|
|
1283
|
+
value
|
|
1284
|
+
}
|
|
1252
1285
|
}
|
|
1253
1286
|
}
|
|
1254
1287
|
... on CoreTransformJinja2 {
|
|
@@ -1466,6 +1499,7 @@ def _parse_artifact_definitions(definitions: list[dict]) -> list[ProposedChangeA
|
|
|
1466
1499
|
query_name=definition["node"]["transformation"]["node"]["query"]["node"]["name"]["value"],
|
|
1467
1500
|
query_id=definition["node"]["transformation"]["node"]["query"]["node"]["id"],
|
|
1468
1501
|
query_models=definition["node"]["transformation"]["node"]["query"]["node"]["models"]["value"] or [],
|
|
1502
|
+
query_payload=definition["node"]["transformation"]["node"]["query"]["node"]["query"]["value"],
|
|
1469
1503
|
repository_id=definition["node"]["transformation"]["node"]["repository"]["node"]["id"],
|
|
1470
1504
|
transform_kind=definition["node"]["transformation"]["node"]["__typename"],
|
|
1471
1505
|
)
|
|
@@ -3,10 +3,15 @@ from __future__ import annotations
|
|
|
3
3
|
from typing import TYPE_CHECKING, Any
|
|
4
4
|
|
|
5
5
|
if TYPE_CHECKING:
|
|
6
|
+
import ssl
|
|
7
|
+
|
|
6
8
|
import httpx
|
|
7
9
|
|
|
8
10
|
|
|
9
11
|
class InfrahubHTTP:
|
|
12
|
+
def verify_tls(self, verify: bool | None = None) -> bool | ssl.SSLContext:
|
|
13
|
+
raise NotImplementedError()
|
|
14
|
+
|
|
10
15
|
async def get(
|
|
11
16
|
self,
|
|
12
17
|
url: str,
|
|
@@ -3,8 +3,10 @@ from __future__ import annotations
|
|
|
3
3
|
from typing import TYPE_CHECKING, Any, overload
|
|
4
4
|
|
|
5
5
|
from prefect.client.schemas.objects import StateType
|
|
6
|
+
from prefect.context import AsyncClientContext
|
|
6
7
|
from prefect.deployments import run_deployment
|
|
7
8
|
|
|
9
|
+
from infrahub.services.adapters.http.httpx import HttpxAdapter
|
|
8
10
|
from infrahub.workers.utils import inject_context_parameter
|
|
9
11
|
from infrahub.workflows.initialization import setup_task_manager
|
|
10
12
|
from infrahub.workflows.models import WorkflowInfo
|
|
@@ -19,6 +21,11 @@ if TYPE_CHECKING:
|
|
|
19
21
|
|
|
20
22
|
|
|
21
23
|
class WorkflowWorkerExecution(InfrahubWorkflow):
|
|
24
|
+
# This is required to grab a cached SSLContext from the HttpAdapter.
|
|
25
|
+
# We cannot use the get_http() dependency since it introduces a circular dependency.
|
|
26
|
+
# We could remove this later on by introducing a cached SSLContext outside of this adapter.
|
|
27
|
+
_http_adapter = HttpxAdapter()
|
|
28
|
+
|
|
22
29
|
@staticmethod
|
|
23
30
|
async def initialize(component_is_primary_server: bool) -> None:
|
|
24
31
|
if component_is_primary_server:
|
|
@@ -79,5 +86,6 @@ class WorkflowWorkerExecution(InfrahubWorkflow):
|
|
|
79
86
|
parameters = dict(parameters) if parameters is not None else {}
|
|
80
87
|
inject_context_parameter(func=flow_func, parameters=parameters, context=context)
|
|
81
88
|
|
|
82
|
-
|
|
89
|
+
async with AsyncClientContext(httpx_settings={"verify": self._http_adapter.verify_tls()}):
|
|
90
|
+
flow_run = await run_deployment(name=workflow.full_name, timeout=0, parameters=parameters or {}, tags=tags) # type: ignore[return-value, misc]
|
|
83
91
|
return WorkflowInfo.from_flow(flow_run=flow_run)
|
infrahub/task_manager/task.py
CHANGED
|
@@ -1,7 +1,10 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
import uuid
|
|
3
|
+
from datetime import datetime, timedelta, timezone
|
|
2
4
|
from typing import Any
|
|
3
5
|
from uuid import UUID
|
|
4
6
|
|
|
7
|
+
from prefect import State
|
|
5
8
|
from prefect.client.orchestration import PrefectClient, get_client
|
|
6
9
|
from prefect.client.schemas.filters import (
|
|
7
10
|
ArtifactFilter,
|
|
@@ -12,6 +15,7 @@ from prefect.client.schemas.filters import (
|
|
|
12
15
|
FlowRunFilter,
|
|
13
16
|
FlowRunFilterId,
|
|
14
17
|
FlowRunFilterName,
|
|
18
|
+
FlowRunFilterStartTime,
|
|
15
19
|
FlowRunFilterState,
|
|
16
20
|
FlowRunFilterStateType,
|
|
17
21
|
FlowRunFilterTags,
|
|
@@ -311,3 +315,72 @@ class PrefectTask:
|
|
|
311
315
|
)
|
|
312
316
|
|
|
313
317
|
return {"count": count or 0, "edges": nodes}
|
|
318
|
+
|
|
319
|
+
@classmethod
|
|
320
|
+
async def delete_flow_runs(
|
|
321
|
+
cls,
|
|
322
|
+
states: list[StateType] = [StateType.COMPLETED, StateType.FAILED, StateType.CANCELLED], # noqa: B006
|
|
323
|
+
delete: bool = True,
|
|
324
|
+
days_to_keep: int = 2,
|
|
325
|
+
batch_size: int = 100,
|
|
326
|
+
) -> None:
|
|
327
|
+
"""Delete flow runs in the specified states and older than specified days."""
|
|
328
|
+
|
|
329
|
+
logger = get_logger()
|
|
330
|
+
|
|
331
|
+
async with get_client(sync_client=False) as client:
|
|
332
|
+
cutoff = datetime.now(timezone.utc) - timedelta(days=days_to_keep)
|
|
333
|
+
|
|
334
|
+
flow_run_filter = FlowRunFilter(
|
|
335
|
+
start_time=FlowRunFilterStartTime(before_=cutoff), # type: ignore[arg-type]
|
|
336
|
+
state=FlowRunFilterState(type=FlowRunFilterStateType(any_=states)),
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
# Get flow runs to delete
|
|
340
|
+
flow_runs = await client.read_flow_runs(flow_run_filter=flow_run_filter, limit=batch_size)
|
|
341
|
+
|
|
342
|
+
deleted_total = 0
|
|
343
|
+
|
|
344
|
+
while True:
|
|
345
|
+
batch_deleted = 0
|
|
346
|
+
failed_deletes = []
|
|
347
|
+
|
|
348
|
+
# Delete each flow run through the API
|
|
349
|
+
for flow_run in flow_runs:
|
|
350
|
+
try:
|
|
351
|
+
if delete:
|
|
352
|
+
await client.delete_flow_run(flow_run_id=flow_run.id)
|
|
353
|
+
else:
|
|
354
|
+
await client.set_flow_run_state(
|
|
355
|
+
flow_run_id=flow_run.id,
|
|
356
|
+
state=State(type=StateType.CRASHED),
|
|
357
|
+
force=True,
|
|
358
|
+
)
|
|
359
|
+
deleted_total += 1
|
|
360
|
+
batch_deleted += 1
|
|
361
|
+
except Exception as e:
|
|
362
|
+
logger.warning(f"Failed to delete flow run {flow_run.id}: {e}")
|
|
363
|
+
failed_deletes.append(flow_run.id)
|
|
364
|
+
|
|
365
|
+
# Rate limiting
|
|
366
|
+
if batch_deleted % 10 == 0:
|
|
367
|
+
await asyncio.sleep(0.5)
|
|
368
|
+
|
|
369
|
+
logger.info(f"Delete {batch_deleted}/{len(flow_runs)} flow runs (total: {deleted_total})")
|
|
370
|
+
|
|
371
|
+
# Get next batch
|
|
372
|
+
previous_flow_run_ids = [fr.id for fr in flow_runs]
|
|
373
|
+
flow_runs = await client.read_flow_runs(flow_run_filter=flow_run_filter, limit=batch_size)
|
|
374
|
+
|
|
375
|
+
if not flow_runs:
|
|
376
|
+
logger.info("No more flow runs to delete")
|
|
377
|
+
break
|
|
378
|
+
|
|
379
|
+
if previous_flow_run_ids == [fr.id for fr in flow_runs]:
|
|
380
|
+
logger.info("Found same flow runs to delete, aborting")
|
|
381
|
+
break
|
|
382
|
+
|
|
383
|
+
# Delay between batches to avoid overwhelming the API
|
|
384
|
+
await asyncio.sleep(1.0)
|
|
385
|
+
|
|
386
|
+
logger.info(f"Retention complete. Total deleted tasks: {deleted_total}")
|
|
@@ -8,6 +8,7 @@ from infrahub_sdk import Config, InfrahubClient
|
|
|
8
8
|
from infrahub_sdk.exceptions import Error as SdkError
|
|
9
9
|
from prefect import settings as prefect_settings
|
|
10
10
|
from prefect.client.schemas.objects import FlowRun
|
|
11
|
+
from prefect.context import AsyncClientContext
|
|
11
12
|
from prefect.flow_engine import run_flow_async
|
|
12
13
|
from prefect.logging.handlers import APILogHandler
|
|
13
14
|
from prefect.workers.base import BaseJobConfiguration, BaseVariables, BaseWorker, BaseWorkerResult
|
|
@@ -27,6 +28,7 @@ from infrahub.workers.dependencies import (
|
|
|
27
28
|
get_cache,
|
|
28
29
|
get_component,
|
|
29
30
|
get_database,
|
|
31
|
+
get_http,
|
|
30
32
|
get_message_bus,
|
|
31
33
|
get_workflow,
|
|
32
34
|
set_component_type,
|
|
@@ -154,7 +156,9 @@ class InfrahubWorkerAsync(BaseWorker):
|
|
|
154
156
|
if task_status:
|
|
155
157
|
task_status.started(True)
|
|
156
158
|
|
|
157
|
-
|
|
159
|
+
async with AsyncClientContext(httpx_settings={"verify": get_http().verify_tls()}) as ctx:
|
|
160
|
+
ctx._httpx_settings = None # Hack to make all child task/flow runs use the same client
|
|
161
|
+
await run_flow_async(flow=flow_func, flow_run=flow_run, parameters=params, return_type="state")
|
|
158
162
|
|
|
159
163
|
return InfrahubWorkerAsyncResult(status_code=0, identifier=str(flow_run.id))
|
|
160
164
|
|
infrahub/workflows/utils.py
CHANGED
|
@@ -9,6 +9,7 @@ from prefect.runtime import flow_run
|
|
|
9
9
|
from infrahub.core.constants import GLOBAL_BRANCH_NAME
|
|
10
10
|
from infrahub.core.registry import registry
|
|
11
11
|
from infrahub.tasks.registry import refresh_branches
|
|
12
|
+
from infrahub.workers.dependencies import get_http
|
|
12
13
|
|
|
13
14
|
from .constants import TAG_NAMESPACE, WorkflowTag
|
|
14
15
|
|
|
@@ -26,7 +27,7 @@ async def add_tags(
|
|
|
26
27
|
namespace: bool = True,
|
|
27
28
|
db_change: bool = False,
|
|
28
29
|
) -> None:
|
|
29
|
-
client = get_client(sync_client=False)
|
|
30
|
+
client = get_client(httpx_settings={"verify": get_http().verify_tls()}, sync_client=False)
|
|
30
31
|
current_flow_run_id = flow_run.id
|
|
31
32
|
current_tags: list[str] = flow_run.tags
|
|
32
33
|
branch_tags = (
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: infrahub-server
|
|
3
|
-
Version: 1.4.
|
|
3
|
+
Version: 1.4.13
|
|
4
4
|
Summary: Infrahub is taking a new approach to Infrastructure Management by providing a new generation of datastore to organize and control all the data that defines how an infrastructure should run.
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Author: OpsMill
|
|
@@ -47,7 +47,7 @@ infrahub/cli/events.py,sha256=nJmowQgTxRs6qaT41A71Ei9jm6qtYaL2amAT5TA1H_k,1726
|
|
|
47
47
|
infrahub/cli/git_agent.py,sha256=ajT9-kdd3xLIysOPe8GqZyCDMkpNyhqfWjBg9HPWVcg,5240
|
|
48
48
|
infrahub/cli/patch.py,sha256=ztOkWyo0l_Wo0WX10bvSqGZibKzowrwx82oi69cjwkY,6018
|
|
49
49
|
infrahub/cli/server.py,sha256=zeKgJE9V0usSMVBwye0sRNNh6Ctj-nSZHqHbNskqyz4,2248
|
|
50
|
-
infrahub/cli/tasks.py,sha256=
|
|
50
|
+
infrahub/cli/tasks.py,sha256=6_2IRd0JCMJV0W9_bF6b-l67dlmBJ5izDtG4YkSaIW0,3364
|
|
51
51
|
infrahub/cli/upgrade.py,sha256=6NJ0y_CmVUVVo-ICER0rLFsMx_VcGUx_wH9OCErSJGA,4660
|
|
52
52
|
infrahub/components.py,sha256=lSLDCDwIZoakZ2iBrfHi9c3BxzugMiuiZO6V7Egt6tk,107
|
|
53
53
|
infrahub/computed_attribute/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -86,7 +86,7 @@ infrahub/core/diff/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
|
|
|
86
86
|
infrahub/core/diff/artifacts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
87
87
|
infrahub/core/diff/artifacts/calculator.py,sha256=qk1DspB3bkKeWJFesLbmziCALVnbRadjrez1kn_IZWU,4435
|
|
88
88
|
infrahub/core/diff/branch_differ.py,sha256=62TRs3tGb4brQqCaVoI2iMIiPnny3_0_e9j-Mq-AXx4,7752
|
|
89
|
-
infrahub/core/diff/calculator.py,sha256=
|
|
89
|
+
infrahub/core/diff/calculator.py,sha256=KQIxvrzNVDLKBhPux3-3boyEsXgSvJm8kzhJ0R88E_c,10104
|
|
90
90
|
infrahub/core/diff/combiner.py,sha256=qL4WQsphB2sVnncgskSG_QcJBqBHjaK0vWU_apeTn-E,23508
|
|
91
91
|
infrahub/core/diff/conflict_transferer.py,sha256=LZCuS9Dbr4yBf-bd3RF-9cPnaOvVWiU3KBmmwxbRZl0,3968
|
|
92
92
|
infrahub/core/diff/conflicts_enricher.py,sha256=x6qiZOXO2A3BQ2Fm78apJ4WA7HLzPO84JomJfcyuyDg,12552
|
|
@@ -118,7 +118,7 @@ infrahub/core/diff/payload_builder.py,sha256=5R_QuPM5P_uQONmTDbtpIjhshs_OJCcXLnV
|
|
|
118
118
|
infrahub/core/diff/query/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
119
119
|
infrahub/core/diff/query/all_conflicts.py,sha256=gWLwkCR2AK0IJccnhcE8vkSHu5ugZfKTDhCoFi4yAJo,3058
|
|
120
120
|
infrahub/core/diff/query/artifact.py,sha256=jopnYwuYEVvknCXqqI3TQnMH69ABfH49p1Zu6hH9dYY,9098
|
|
121
|
-
infrahub/core/diff/query/delete_query.py,sha256=
|
|
121
|
+
infrahub/core/diff/query/delete_query.py,sha256=R8S-TzraWOi1Fz7FqBtmK8FJtEe4_9OgK32gUq2dlzc,1087
|
|
122
122
|
infrahub/core/diff/query/diff_get.py,sha256=SzlJAF5DNKcbavgVOxLKJ-o8EsuImTGE2uNPg9hcMq0,7438
|
|
123
123
|
infrahub/core/diff/query/diff_summary.py,sha256=sypXfK4EO_BZBuohlv419AjgL5ZeRwMiwnI7IIlh0KE,3841
|
|
124
124
|
infrahub/core/diff/query/drop_nodes.py,sha256=NP29dbW-z4s_rp_MtIwTl3FXElfCP6eqEpF_9r3Z3VA,1674
|
|
@@ -127,7 +127,7 @@ infrahub/core/diff/query/field_summary.py,sha256=-1p6xeyn0w6fQPrpOI6a9Id95wqtdxK
|
|
|
127
127
|
infrahub/core/diff/query/filters.py,sha256=McTtRNGg8fmnqTtNH-msfzH-8eKCBsM6-fitxTp5T8w,4324
|
|
128
128
|
infrahub/core/diff/query/get_conflict_query.py,sha256=kpGZA4QZrXxv_vnoAP5oa9-347VzsNWUIBWcg7rg03U,892
|
|
129
129
|
infrahub/core/diff/query/has_conflicts_query.py,sha256=kt0Z606vP2r1g7OqW2RrYj9LbiVkrzGfQ0AKCHx21XI,2547
|
|
130
|
-
infrahub/core/diff/query/merge.py,sha256=
|
|
130
|
+
infrahub/core/diff/query/merge.py,sha256=TvDldR3iRFr23-I3zJZRgcUhkfguBY2d785DQAHSOHg,32966
|
|
131
131
|
infrahub/core/diff/query/merge_tracking_id.py,sha256=VLGsKuOCIMYe0I-0r01YHF5iaLYIkfSCVQatHM-ybFA,833
|
|
132
132
|
infrahub/core/diff/query/roots_metadata.py,sha256=FT-48amqoR2RS4CkfnnXGI7Z5uOL4hm7IdZiz3SFHRo,2182
|
|
133
133
|
infrahub/core/diff/query/save.py,sha256=xBKWpWfRWfaP7g523xKMK82ogg0AfVQTTMeyz8oe-o0,22956
|
|
@@ -159,7 +159,7 @@ infrahub/core/ipam/utilization.py,sha256=OKFvcCoxFTkYnwibLhBM2Kbb2vsyI4eX07gtCf_
|
|
|
159
159
|
infrahub/core/manager.py,sha256=zlmxJnioJmZynjiRT3jFnBIWGe0Z38uwVm1ZLyxX_MU,47644
|
|
160
160
|
infrahub/core/merge.py,sha256=TNZpxjNYcl3dnvE8eYXaWSXFDYeEa8DDsS9XbR2XKlA,11217
|
|
161
161
|
infrahub/core/migrations/__init__.py,sha256=dIExw90CrdTByeJqpiWkaZBclpAfzatG2H6fXx54su0,1305
|
|
162
|
-
infrahub/core/migrations/graph/__init__.py,sha256=
|
|
162
|
+
infrahub/core/migrations/graph/__init__.py,sha256=Et5wn668HEwN_uLWlwtF0JkjJiZqR_mFbE_BaNdWcmo,4363
|
|
163
163
|
infrahub/core/migrations/graph/m001_add_version_to_graph.py,sha256=YcLN6cFjE6IGheXR4Ujb6CcyY8bJ7WE289hcKJaENOc,1515
|
|
164
164
|
infrahub/core/migrations/graph/m002_attribute_is_default.py,sha256=wB6f2N_ChTvGajqHD-OWCG5ahRMDhhXZuwo79ieq_II,1036
|
|
165
165
|
infrahub/core/migrations/graph/m003_relationship_parent_optional.py,sha256=Aya-s98XfE9C7YluOwEjilwgnjaBnZxp27w_Xdv_NmU,2330
|
|
@@ -200,6 +200,7 @@ infrahub/core/migrations/graph/m037_index_attr_vals.py,sha256=bJB4yPWE73XA_ErUcn
|
|
|
200
200
|
infrahub/core/migrations/graph/m038_redo_0000_prefix_fix.py,sha256=8seWnXQhgEJDFLWxYHVcnMNDPcHq5C24c0RYrtn_WGE,2411
|
|
201
201
|
infrahub/core/migrations/graph/m039_ipam_reconcile.py,sha256=gUf4Fo3CrzJ2hwbaKlQclripTDrI7cVk_GHsBlBNMKE,10916
|
|
202
202
|
infrahub/core/migrations/graph/m040_duplicated_attributes.py,sha256=2LxsG-CfcZnBirwGhwYL4kU-g3oxl6lNSM12vZTZ7Gw,2930
|
|
203
|
+
infrahub/core/migrations/graph/m041_deleted_dup_edges.py,sha256=eP2BqUfvwkjACJrKI5fVyBBmXxEDwxtAD9O_CcbwBMw,5409
|
|
203
204
|
infrahub/core/migrations/query/__init__.py,sha256=JoWOUWlV6IzwxWxObsfCnAAKUOHJkE7dZlOsfB64ZEo,876
|
|
204
205
|
infrahub/core/migrations/query/attribute_add.py,sha256=oitzB-PPAclfyNtcwCWJY3RdI5Zi4oEnR62BDzn1UQk,4835
|
|
205
206
|
infrahub/core/migrations/query/attribute_rename.py,sha256=onb9Nanht1Tz47JgneAcFsuhqqvPS6dvI2nNjRupLLo,6892
|
|
@@ -243,9 +244,9 @@ infrahub/core/query/__init__.py,sha256=2qIMaODLwJ6pK6BUd5vODTlA15Aecf5I8_-J44UlC
|
|
|
243
244
|
infrahub/core/query/attribute.py,sha256=xojZIHX-XfXlN_jgM1TQ1Bp4dXr4oLEWlr2A7igTvIg,12658
|
|
244
245
|
infrahub/core/query/branch.py,sha256=aIYyDxpnw_Zw2lqTnMEVlhPUaYckZtJJJU1SFUht1o0,4343
|
|
245
246
|
infrahub/core/query/delete.py,sha256=7tPP1qtNV6QGYtmgE1RKsuQ9oxENnMTVkttLvJ2PiKg,1927
|
|
246
|
-
infrahub/core/query/diff.py,sha256=
|
|
247
|
+
infrahub/core/query/diff.py,sha256=uvojpzJSZFdcuutPAbA74M6R85hN1fteBDS4ZufE8IA,38579
|
|
247
248
|
infrahub/core/query/ipam.py,sha256=dOs_LZr-DONrCPw6t5Ug9mBPn8a-S2NKja3Vr-zIeaM,34523
|
|
248
|
-
infrahub/core/query/node.py,sha256=
|
|
249
|
+
infrahub/core/query/node.py,sha256=7pTAXxHHMR_BZjKACShMHhQrErB_NNqxH4-YCncMe6E,70661
|
|
249
250
|
infrahub/core/query/relationship.py,sha256=GpaEcf8YRiVpqTxrp10NFOUCHeyE7SqhOFyf3F44eNo,48474
|
|
250
251
|
infrahub/core/query/resource_manager.py,sha256=uSvs1WZmdbyt_PjaUi9lXnYdPt-lhJV1RjYoUHYjQdk,16620
|
|
251
252
|
infrahub/core/query/standard_node.py,sha256=mPBXyqk4RzoWRUX4NoojoVi8zk-sJ03GmzmUaWqOgSI,4825
|
|
@@ -439,20 +440,20 @@ infrahub/generators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
|
|
|
439
440
|
infrahub/generators/models.py,sha256=9qhSfsoG-uYux35HClAxSq7TRfkosqN3i_eQkeTokLs,1916
|
|
440
441
|
infrahub/generators/tasks.py,sha256=wxlRsGHUwuYxbpuz6ReQ40DPmRTGwndorBdcpzpvp_8,9552
|
|
441
442
|
infrahub/git/__init__.py,sha256=KeQ9U8UI5jDj6KB6j00Oal7MZmtOD9vKqVgiezG_EQA,281
|
|
442
|
-
infrahub/git/base.py,sha256=
|
|
443
|
+
infrahub/git/base.py,sha256=1b6-fKCIzDCu49cS3LqNeqmm0iqKOsUNhimVmPIzJ98,38778
|
|
443
444
|
infrahub/git/constants.py,sha256=XpzcAkXbsgXZgrXey74id1sXV8Q6EHb_4FNw7BndxyY,106
|
|
444
445
|
infrahub/git/directory.py,sha256=fozxLXXJPweHG95yQwQkR5yy3sfTdmHiczCAJnsUX54,861
|
|
445
446
|
infrahub/git/integrator.py,sha256=4vlTqped0IFU3elgmwZKRvDHaSu2qoBNWikrIb0S2no,62892
|
|
446
447
|
infrahub/git/models.py,sha256=i-6KsAm98B__bP23VVykhuMqadqCLfT0sJAIamRzLII,12358
|
|
447
448
|
infrahub/git/repository.py,sha256=Z8I-DMkT6hfBkm3bQwQgkbilTGEgFl14sV8sr3g73mA,11584
|
|
448
|
-
infrahub/git/tasks.py,sha256=
|
|
449
|
+
infrahub/git/tasks.py,sha256=cQ7nbcqEKq67qmmFyYFR4HfPyjs6sCl4zh2Hzce_N5Q,39610
|
|
449
450
|
infrahub/git/utils.py,sha256=1VCvxpXIpDWlM15Ix8IJEsMXNWMRG9gKLjaHb3RSTqg,5345
|
|
450
451
|
infrahub/git/worktree.py,sha256=8IYJWOBytKUWwhMmMVehR4ceeO9e13nV-mvn3iVEgZY,1727
|
|
451
452
|
infrahub/git_credential/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
452
453
|
infrahub/git_credential/askpass.py,sha256=BL7e4Xkx5la7XFk-GQR6MXxV5B29Mzb5ZnVnljd7Xpw,1513
|
|
453
454
|
infrahub/git_credential/helper.py,sha256=cwSMKRTgqrqIBM66jEOtlj4MMLf647KJWmtnnVxFtTY,2337
|
|
454
455
|
infrahub/graphql/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
455
|
-
infrahub/graphql/analyzer.py,sha256=
|
|
456
|
+
infrahub/graphql/analyzer.py,sha256=PwVB7jSIvzRA1SJmRopUZ7yRRGqDFnD1toB-K3WmWgQ,30947
|
|
456
457
|
infrahub/graphql/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
457
458
|
infrahub/graphql/api/dependencies.py,sha256=-NMUA_N4tWcVpS6ksCebAyza-JTmHqyYY_QZizgBR1c,1690
|
|
458
459
|
infrahub/graphql/api/endpoints.py,sha256=wH9eO3CFT-eoSe1Y32BhU9mIf6smEnPeP3tAxZkdt4g,1510
|
|
@@ -579,7 +580,7 @@ infrahub/message_bus/operations/refresh/__init__.py,sha256=vBuvTL4zRRpOMXATmckQ3
|
|
|
579
580
|
infrahub/message_bus/operations/refresh/registry.py,sha256=ny-8_Gsd46CgdDm_ZXBwMpYdxNMU3t7CyglTWH3Q6Ww,1277
|
|
580
581
|
infrahub/message_bus/operations/send/__init__.py,sha256=ivuUTAknLiWfArR44SxA40l0UKVkdHjtDIx0mg06IcE,39
|
|
581
582
|
infrahub/message_bus/operations/send/echo.py,sha256=656IFCpl2EA6EQjA2iwdJtYyo4yKb6iiv4r3oqQEd6o,712
|
|
582
|
-
infrahub/message_bus/types.py,sha256=
|
|
583
|
+
infrahub/message_bus/types.py,sha256=awEghaKn_UQFsx_7t1m_Gqh97VL5NSpZHHgU5ONrRBM,4535
|
|
583
584
|
infrahub/middleware.py,sha256=Su129MXkXazE9ODlIZ_KtuRHOakMsOHbVKIx15NKXpU,1547
|
|
584
585
|
infrahub/models.py,sha256=QmwJwo3hNCta8BXM7eLsD9qv1S73Rj0cC_crLpadHTc,715
|
|
585
586
|
infrahub/patch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -625,7 +626,7 @@ infrahub/proposed_change/branch_diff.py,sha256=IdMxf5zPmhybQKPPz7AlruNmLCKf5VISP
|
|
|
625
626
|
infrahub/proposed_change/checker.py,sha256=ZhNEVJKsQbHH2UE1O35MfOVa8cK1QGEqGyn6MsOuqSQ,1558
|
|
626
627
|
infrahub/proposed_change/constants.py,sha256=auifG94Oo2cJ4RwZx4P-XDPDpKYPtEVxh013KPfiEdU,2080
|
|
627
628
|
infrahub/proposed_change/models.py,sha256=ivWJmEAihprKmwgaBGDJ4Koq4ETciE5GfDp86KHDnns,5892
|
|
628
|
-
infrahub/proposed_change/tasks.py,sha256=
|
|
629
|
+
infrahub/proposed_change/tasks.py,sha256=z2Pm8siPqsRUapHQ2yB--2eOPQ4Jee6X-oOlB7-7d6o,65661
|
|
629
630
|
infrahub/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
630
631
|
infrahub/pytest_plugin.py,sha256=u3t0WgLMo9XmuQYeb28mccQ3xbnyv2Fv173YWl1zBiM,6678
|
|
631
632
|
infrahub/schema/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -642,7 +643,7 @@ infrahub/services/adapters/cache/__init__.py,sha256=WsEthxQbcyCOA2M_FzfQr0Cymo4h
|
|
|
642
643
|
infrahub/services/adapters/cache/nats.py,sha256=FLGqgDuqf1MD7PRxngiCA1jDdh307ypvaKEMK2P4oxU,5697
|
|
643
644
|
infrahub/services/adapters/cache/redis.py,sha256=PKFdPncLI59w-KUaHSwgQm9VSJkO2h7xMh9xl-aID7I,2115
|
|
644
645
|
infrahub/services/adapters/event/__init__.py,sha256=KUA6mW-9JF1haFu4D0G8CTETcR7y_yvpTg7avbQ0wgM,1372
|
|
645
|
-
infrahub/services/adapters/http/__init__.py,sha256=
|
|
646
|
+
infrahub/services/adapters/http/__init__.py,sha256=_IZUvukHSSd8TdgslW5hLGwQ6GNPYanpPX7aClSh3zM,686
|
|
646
647
|
infrahub/services/adapters/http/httpx.py,sha256=jUPbxnjYZzWxk7fnFt2D4eSbd4JmiAGZFPk0Tz-Eyo0,3652
|
|
647
648
|
infrahub/services/adapters/message_bus/__init__.py,sha256=CwhjOnRXJLYHfejG5vPAiz_8BEdvxTjgHAjNaIg8EtE,3180
|
|
648
649
|
infrahub/services/adapters/message_bus/local.py,sha256=Ly-_c4CcNI6Q3pRiks5CuTwcpekQKEWWnyYVl1QRfpw,2359
|
|
@@ -650,7 +651,7 @@ infrahub/services/adapters/message_bus/nats.py,sha256=T_kESHII4pggoRytZe1xOgZoBo
|
|
|
650
651
|
infrahub/services/adapters/message_bus/rabbitmq.py,sha256=x8KTt6dvV7cObpK1LkmCLfI-kRu7AWsGqxiou9JM_xw,10795
|
|
651
652
|
infrahub/services/adapters/workflow/__init__.py,sha256=I9yA0lPJF4k3pznu6FJxsEVasgS4r7e5IsQqtpAXL84,1643
|
|
652
653
|
infrahub/services/adapters/workflow/local.py,sha256=C8VgrgmxQOKpNN11Iili1EJ4VRR0yGNZDkO7fR2MSXg,1487
|
|
653
|
-
infrahub/services/adapters/workflow/worker.py,sha256=
|
|
654
|
+
infrahub/services/adapters/workflow/worker.py,sha256=zDDthMUKnIplTJLhT3wa_xjCohY7GUR3h7h8w298eJ0,3604
|
|
654
655
|
infrahub/services/component.py,sha256=hPEHtFBIClLz8GNByKP4bABVZXN9CcfT__p0KQWnlWw,5688
|
|
655
656
|
infrahub/services/protocols.py,sha256=Ci4cnWK6L_R_5V2qAPnQpHtKXYS0hktp7CoJWIbcbc0,754
|
|
656
657
|
infrahub/services/scheduler.py,sha256=TbKg74oBINScHJYtV8_lOuQR2RjxqS6IfU_slyjpNYw,3246
|
|
@@ -659,7 +660,7 @@ infrahub/task_manager/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
|
|
|
659
660
|
infrahub/task_manager/constants.py,sha256=1t1BZRa8_y89gIDPNHzIbRKo63nHOP37-r5OvtHa56c,559
|
|
660
661
|
infrahub/task_manager/event.py,sha256=n9q62qWHuqE-sxNDq0WRTovoNlBGT6o_8bJOYMLDHqA,13885
|
|
661
662
|
infrahub/task_manager/models.py,sha256=KoEEa7OZN3lxOgNgPWGQ_IK-yno27f3Q7k_zgVYkq2I,8541
|
|
662
|
-
infrahub/task_manager/task.py,sha256=
|
|
663
|
+
infrahub/task_manager/task.py,sha256=CAc7ZMZtQ1uniSnvtmVWSBb4kMOLJEzDob0xiKUDvTQ,15308
|
|
663
664
|
infrahub/tasks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
664
665
|
infrahub/tasks/artifact.py,sha256=q1MyQAbT77pD-pm8StHsF_JlTpEQreNc51JHJfnsTD4,1958
|
|
665
666
|
infrahub/tasks/check.py,sha256=37n1U1Knb3AV6kz2sw_IabL9pnlqceLVICWf9GdSxZE,687
|
|
@@ -699,14 +700,14 @@ infrahub/webhook/triggers.py,sha256=v1dzFV4wX0GO2n5hft_qzp-oJOA2P_9Q2eTcSP-i0pk,
|
|
|
699
700
|
infrahub/worker.py,sha256=zV9vLXtJzyqeTGtVolwZEHlLaBvGiUZv00qWpE-lnOM,353
|
|
700
701
|
infrahub/workers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
701
702
|
infrahub/workers/dependencies.py,sha256=7Zb1JHjNfNFjXnxrZxWZJE_axSCwwkfthttxfVoHcRY,5170
|
|
702
|
-
infrahub/workers/infrahub_async.py,sha256=
|
|
703
|
+
infrahub/workers/infrahub_async.py,sha256=Ne2Rhwp4UYiospU6OLHJNkRG0NHPbQW3VVF8R7LQiMY,8236
|
|
703
704
|
infrahub/workers/utils.py,sha256=m6FOKrYo53Aoj-JcEyQ7-J4Dc20R9JtHMDzTcqXiRpg,2407
|
|
704
705
|
infrahub/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
705
706
|
infrahub/workflows/catalogue.py,sha256=cHV51ZQXQhyoEgCahppv--vyDYbjo-Duu2LemeRho_k,18283
|
|
706
707
|
infrahub/workflows/constants.py,sha256=7je2FF7tJH6x_ZNqHKZfQX91X7I5gmD8OECN3dE_eqI,651
|
|
707
708
|
infrahub/workflows/initialization.py,sha256=Aj3tTD4tL9qhNgSnzFIMkTHab8FWWgL0JINaON9wjj0,3224
|
|
708
709
|
infrahub/workflows/models.py,sha256=eCra3PFbX-GpiNLDG6WyqVXvSqRTz1eF4REm0doJFDY,3534
|
|
709
|
-
infrahub/workflows/utils.py,sha256=
|
|
710
|
+
infrahub/workflows/utils.py,sha256=nJ0K3FtIy-MG0O33h_p5ggU7rxF3Fdj5fIWJ1D7Jl7A,2833
|
|
710
711
|
infrahub_sdk/__init__.py,sha256=weZAa06Ar0NO5IOKLQICtCceHUCKQxbkBxHebqQGJ1o,401
|
|
711
712
|
infrahub_sdk/_importer.py,sha256=8oHTMxa_AMO_qbfb3UXNfjSr31S5YJTcqe-YMrixY_E,2257
|
|
712
713
|
infrahub_sdk/analyzer.py,sha256=UDJN372vdAiuAv2TEyPUlsSVoUfZN6obWkIokNNaHbA,4148
|
|
@@ -830,8 +831,8 @@ infrahub_testcontainers/models.py,sha256=ASYyvl7d_WQz_i7y8-3iab9hwwmCl3OCJavqVbe
|
|
|
830
831
|
infrahub_testcontainers/performance_test.py,sha256=hvwiy6tc_lWniYqGkqfOXVGAmA_IV15VOZqbiD9ezno,6149
|
|
831
832
|
infrahub_testcontainers/plugin.py,sha256=I3RuZQ0dARyKHuqCf0y1Yj731P2Mwf3BJUehRJKeWrs,5645
|
|
832
833
|
infrahub_testcontainers/prometheus.yml,sha256=610xQEyj3xuVJMzPkC4m1fRnCrjGpiRBrXA2ytCLa54,599
|
|
833
|
-
infrahub_server-1.4.
|
|
834
|
-
infrahub_server-1.4.
|
|
835
|
-
infrahub_server-1.4.
|
|
836
|
-
infrahub_server-1.4.
|
|
837
|
-
infrahub_server-1.4.
|
|
834
|
+
infrahub_server-1.4.13.dist-info/LICENSE.txt,sha256=7GQO7kxVoQYnZtFrjZBKLRXbrGwwwimHPPOJtqXsozQ,11340
|
|
835
|
+
infrahub_server-1.4.13.dist-info/METADATA,sha256=sSvE6USFmBC_wjScED0h7u4IBm_XfDdI7eP3g2FBCDA,6259
|
|
836
|
+
infrahub_server-1.4.13.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
837
|
+
infrahub_server-1.4.13.dist-info/entry_points.txt,sha256=UXIeFWDsrV-4IllNvUEd6KieYGzQfn9paga2YyABOQI,393
|
|
838
|
+
infrahub_server-1.4.13.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|