infrahub-server 1.1.8__py3-none-any.whl → 1.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/config.py +6 -0
- infrahub/core/diff/query/merge.py +20 -17
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/migrations/__init__.py +1 -0
- infrahub/core/migrations/graph/__init__.py +4 -0
- infrahub/core/migrations/graph/m021_missing_hierarchy_merge.py +51 -0
- infrahub/core/migrations/graph/m022_missing_hierarchy_backfill.py +69 -0
- infrahub/core/migrations/query/node_duplicate.py +18 -21
- infrahub/core/models.py +15 -0
- infrahub/core/query/node.py +3 -0
- infrahub/core/validators/interface.py +6 -2
- infrahub/core/validators/model.py +2 -0
- infrahub/core/validators/node/hierarchy.py +1 -3
- infrahub/core/validators/node/inherit_from.py +25 -3
- infrahub/core/validators/tasks.py +8 -0
- infrahub/core/validators/uniqueness/checker.py +10 -9
- infrahub/database/__init__.py +9 -1
- infrahub/database/metrics.py +7 -1
- infrahub/graphql/initialization.py +3 -0
- infrahub/graphql/loaders/node.py +2 -12
- infrahub/graphql/loaders/peers.py +77 -0
- infrahub/graphql/loaders/shared.py +13 -0
- infrahub/graphql/resolvers/many_relationship.py +264 -0
- infrahub/graphql/resolvers/resolver.py +3 -103
- infrahub/graphql/subscription/graphql_query.py +2 -0
- {infrahub_server-1.1.8.dist-info → infrahub_server-1.1.10.dist-info}/METADATA +1 -1
- {infrahub_server-1.1.8.dist-info → infrahub_server-1.1.10.dist-info}/RECORD +32 -27
- infrahub_testcontainers/container.py +11 -0
- infrahub_testcontainers/docker-compose.test.yml +3 -6
- {infrahub_server-1.1.8.dist-info → infrahub_server-1.1.10.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.1.8.dist-info → infrahub_server-1.1.10.dist-info}/WHEEL +0 -0
- {infrahub_server-1.1.8.dist-info → infrahub_server-1.1.10.dist-info}/entry_points.txt +0 -0
infrahub/config.py
CHANGED
|
@@ -249,6 +249,12 @@ class DatabaseSettings(BaseSettings):
|
|
|
249
249
|
retry_limit: int = Field(
|
|
250
250
|
default=3, description="Maximum number of times a transient issue in a transaction should be retried."
|
|
251
251
|
)
|
|
252
|
+
max_concurrent_queries: int = Field(
|
|
253
|
+
default=0, ge=0, description="Maximum number of concurrent queries that can run (0 means unlimited)."
|
|
254
|
+
)
|
|
255
|
+
max_concurrent_queries_delay: float = Field(
|
|
256
|
+
default=0.01, ge=0, description="Delay to add when max_concurrent_queries is reached."
|
|
257
|
+
)
|
|
252
258
|
|
|
253
259
|
@property
|
|
254
260
|
def database_name(self) -> str:
|
|
@@ -36,6 +36,7 @@ class DiffMergeQuery(Query):
|
|
|
36
36
|
"target_branch": self.target_branch.name,
|
|
37
37
|
"source_branch": self.source_branch_name,
|
|
38
38
|
}
|
|
39
|
+
# ruff: noqa: E501
|
|
39
40
|
query = """
|
|
40
41
|
UNWIND $node_diff_dicts AS node_diff_map
|
|
41
42
|
CALL {
|
|
@@ -242,9 +243,11 @@ CALL {
|
|
|
242
243
|
CASE
|
|
243
244
|
WHEN startNode(source_r_rel_2).uuid = r.uuid THEN "r"
|
|
244
245
|
ELSE "l"
|
|
245
|
-
END AS r2_dir
|
|
246
|
+
END AS r2_dir,
|
|
247
|
+
source_r_rel_1.hierarchy AS r1_hierarchy,
|
|
248
|
+
source_r_rel_2.hierarchy AS r2_hierarchy
|
|
246
249
|
}
|
|
247
|
-
WITH n, r, r1_dir, r2_dir, rel_name, rel_peer_id, related_rel_status
|
|
250
|
+
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer_id, related_rel_status
|
|
248
251
|
CALL {
|
|
249
252
|
WITH n, rel_name, rel_peer_id, related_rel_status
|
|
250
253
|
OPTIONAL MATCH (n)
|
|
@@ -258,12 +261,12 @@ CALL {
|
|
|
258
261
|
SET target_r_rel_1.to = $at
|
|
259
262
|
SET target_r_rel_2.to = $at
|
|
260
263
|
}
|
|
261
|
-
WITH n, r, r1_dir, r2_dir, rel_name, rel_peer_id, related_rel_status
|
|
264
|
+
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer_id, related_rel_status
|
|
262
265
|
// ------------------------------
|
|
263
266
|
// conditionally create new IS_RELATED relationships on target_branch, if necessary
|
|
264
267
|
// ------------------------------
|
|
265
268
|
CALL {
|
|
266
|
-
WITH n, r, r1_dir, r2_dir, rel_name, rel_peer_id, related_rel_status
|
|
269
|
+
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer_id, related_rel_status
|
|
267
270
|
MATCH (p:Node {uuid: rel_peer_id})
|
|
268
271
|
OPTIONAL MATCH (n)
|
|
269
272
|
-[r_rel_1:IS_RELATED {branch: $target_branch, status: related_rel_status}]
|
|
@@ -274,42 +277,42 @@ CALL {
|
|
|
274
277
|
AND (r_rel_1.to >= $at OR r_rel_1.to IS NULL)
|
|
275
278
|
AND r_rel_2.from <= $at
|
|
276
279
|
AND (r_rel_2.to >= $at OR r_rel_2.to IS NULL)
|
|
277
|
-
WITH n, r, r1_dir, r2_dir, p, related_rel_status, r_rel_1, r_rel_2
|
|
280
|
+
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, p, related_rel_status, r_rel_1, r_rel_2
|
|
278
281
|
WHERE r_rel_1 IS NULL
|
|
279
282
|
AND r_rel_2 IS NULL
|
|
280
283
|
// ------------------------------
|
|
281
284
|
// create IS_RELATED relationships with directions maintained from source
|
|
282
285
|
// ------------------------------
|
|
283
286
|
CALL {
|
|
284
|
-
WITH n, r, r1_dir, related_rel_status
|
|
285
|
-
WITH n, r, r1_dir, related_rel_status
|
|
287
|
+
WITH n, r, r1_dir, r1_hierarchy, related_rel_status
|
|
288
|
+
WITH n, r, r1_dir, r1_hierarchy, related_rel_status
|
|
286
289
|
WHERE r1_dir = "r"
|
|
287
290
|
CREATE (n)
|
|
288
|
-
-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status}]
|
|
291
|
+
-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r1_hierarchy}]
|
|
289
292
|
->(r)
|
|
290
293
|
}
|
|
291
294
|
CALL {
|
|
292
|
-
WITH n, r, r1_dir, related_rel_status
|
|
293
|
-
WITH n, r, r1_dir, related_rel_status
|
|
295
|
+
WITH n, r, r1_dir, r1_hierarchy, related_rel_status
|
|
296
|
+
WITH n, r, r1_dir, r1_hierarchy, related_rel_status
|
|
294
297
|
WHERE r1_dir = "l"
|
|
295
298
|
CREATE (n)
|
|
296
|
-
<-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status}]
|
|
299
|
+
<-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r1_hierarchy}]
|
|
297
300
|
-(r)
|
|
298
301
|
}
|
|
299
302
|
CALL {
|
|
300
|
-
WITH r, p, r2_dir, related_rel_status
|
|
301
|
-
WITH r, p, r2_dir, related_rel_status
|
|
303
|
+
WITH r, p, r2_dir, r2_hierarchy, related_rel_status
|
|
304
|
+
WITH r, p, r2_dir, r2_hierarchy, related_rel_status
|
|
302
305
|
WHERE r2_dir = "r"
|
|
303
306
|
CREATE (r)
|
|
304
|
-
-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status}]
|
|
307
|
+
-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r2_hierarchy}]
|
|
305
308
|
->(p)
|
|
306
309
|
}
|
|
307
310
|
CALL {
|
|
308
|
-
WITH r, p, r2_dir, related_rel_status
|
|
309
|
-
WITH r, p, r2_dir, related_rel_status
|
|
311
|
+
WITH r, p, r2_dir, r2_hierarchy, related_rel_status
|
|
312
|
+
WITH r, p, r2_dir, r2_hierarchy, related_rel_status
|
|
310
313
|
WHERE r2_dir = "l"
|
|
311
314
|
CREATE (r)
|
|
312
|
-
<-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status}]
|
|
315
|
+
<-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r2_hierarchy}]
|
|
313
316
|
-(p)
|
|
314
317
|
}
|
|
315
318
|
}
|
infrahub/core/graph/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
GRAPH_VERSION =
|
|
1
|
+
GRAPH_VERSION = 22
|
|
@@ -13,6 +13,7 @@ MIGRATION_MAP: dict[str, Optional[type[SchemaMigration]]] = {
|
|
|
13
13
|
"node.branch.update": None,
|
|
14
14
|
"node.attribute.add": NodeAttributeAddMigration,
|
|
15
15
|
"node.attribute.remove": NodeAttributeRemoveMigration,
|
|
16
|
+
"node.inherit_from.update": NodeKindUpdateMigration,
|
|
16
17
|
"node.name.update": NodeKindUpdateMigration,
|
|
17
18
|
"node.namespace.update": NodeKindUpdateMigration,
|
|
18
19
|
"node.relationship.remove": PlaceholderDummyMigration,
|
|
@@ -22,6 +22,8 @@ from .m017_add_core_profile import Migration017
|
|
|
22
22
|
from .m018_uniqueness_nulls import Migration018
|
|
23
23
|
from .m019_restore_rels_to_time import Migration019
|
|
24
24
|
from .m020_duplicate_edges import Migration020
|
|
25
|
+
from .m021_missing_hierarchy_merge import Migration021
|
|
26
|
+
from .m022_missing_hierarchy_backfill import Migration022
|
|
25
27
|
|
|
26
28
|
if TYPE_CHECKING:
|
|
27
29
|
from infrahub.core.root import Root
|
|
@@ -49,6 +51,8 @@ MIGRATIONS: list[type[Union[GraphMigration, InternalSchemaMigration, ArbitraryMi
|
|
|
49
51
|
Migration018,
|
|
50
52
|
Migration019,
|
|
51
53
|
Migration020,
|
|
54
|
+
Migration021,
|
|
55
|
+
Migration022,
|
|
52
56
|
]
|
|
53
57
|
|
|
54
58
|
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any, Sequence
|
|
4
|
+
|
|
5
|
+
from infrahub.core.migrations.shared import GraphMigration, MigrationResult
|
|
6
|
+
from infrahub.log import get_logger
|
|
7
|
+
|
|
8
|
+
from ...query import Query, QueryType
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from infrahub.database import InfrahubDatabase
|
|
12
|
+
|
|
13
|
+
log = get_logger()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class SetMissingHierarchyQuery(Query):
|
|
17
|
+
name = "set_missing_hierarchy"
|
|
18
|
+
type = QueryType.WRITE
|
|
19
|
+
insert_return = False
|
|
20
|
+
|
|
21
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None:
|
|
22
|
+
query = """
|
|
23
|
+
MATCH (r:Root)
|
|
24
|
+
WITH r.default_branch AS default_branch
|
|
25
|
+
MATCH (n:Node)-[main_e:IS_RELATED {branch: default_branch}]-(rel:Relationship)
|
|
26
|
+
WHERE main_e.hierarchy IS NULL
|
|
27
|
+
CALL {
|
|
28
|
+
WITH n, main_e, rel
|
|
29
|
+
MATCH (n)-[branch_e:IS_RELATED]-(rel)
|
|
30
|
+
WHERE branch_e.hierarchy IS NOT NULL
|
|
31
|
+
AND branch_e.branch <> main_e.branch
|
|
32
|
+
AND branch_e.from < main_e.from
|
|
33
|
+
SET main_e.hierarchy = branch_e.hierarchy
|
|
34
|
+
}
|
|
35
|
+
"""
|
|
36
|
+
self.add_to_query(query)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class Migration021(GraphMigration):
|
|
40
|
+
"""
|
|
41
|
+
A bug in diff merge logic caused the hierarchy information on IS_RELATED edges to be lost when merged into
|
|
42
|
+
main. This migration sets the missing hierarchy data.
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
name: str = "021_replace_hierarchy"
|
|
46
|
+
minimum_version: int = 20
|
|
47
|
+
queries: Sequence[type[Query]] = [SetMissingHierarchyQuery]
|
|
48
|
+
|
|
49
|
+
async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult:
|
|
50
|
+
result = MigrationResult()
|
|
51
|
+
return result
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any, Sequence
|
|
4
|
+
|
|
5
|
+
from infrahub.core import registry
|
|
6
|
+
from infrahub.core.initialization import initialization
|
|
7
|
+
from infrahub.core.migrations.shared import GraphMigration, MigrationResult
|
|
8
|
+
from infrahub.lock import initialize_lock
|
|
9
|
+
from infrahub.log import get_logger
|
|
10
|
+
|
|
11
|
+
from ...query import Query, QueryType
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from infrahub.database import InfrahubDatabase
|
|
15
|
+
|
|
16
|
+
log = get_logger()
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class BackfillMissingHierarchyQuery(Query):
|
|
20
|
+
name = "backfill_missing_hierarchy"
|
|
21
|
+
type = QueryType.WRITE
|
|
22
|
+
insert_return = False
|
|
23
|
+
|
|
24
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
25
|
+
# load schemas from database into registry
|
|
26
|
+
initialize_lock()
|
|
27
|
+
await initialization(db=db)
|
|
28
|
+
kind_hierarchy_map: dict[str, str] = {}
|
|
29
|
+
schema_branch = await registry.schema.load_schema_from_db(db=db)
|
|
30
|
+
for node_schema_kind in schema_branch.node_names:
|
|
31
|
+
node_schema = schema_branch.get_node(name=node_schema_kind, duplicate=False)
|
|
32
|
+
if node_schema.hierarchy:
|
|
33
|
+
kind_hierarchy_map[node_schema.kind] = node_schema.hierarchy
|
|
34
|
+
|
|
35
|
+
self.params = {"hierarchy_map": kind_hierarchy_map}
|
|
36
|
+
query = """
|
|
37
|
+
MATCH (r:Root)
|
|
38
|
+
WITH r.default_branch AS default_branch
|
|
39
|
+
MATCH (rel:Relationship {name: "parent__child"})-[e:IS_RELATED]-(n:Node)
|
|
40
|
+
WHERE e.hierarchy IS NULL
|
|
41
|
+
WITH DISTINCT rel, n, default_branch
|
|
42
|
+
CALL {
|
|
43
|
+
WITH rel, n, default_branch
|
|
44
|
+
MATCH (rel)-[e:IS_RELATED {branch: default_branch}]-(n)
|
|
45
|
+
RETURN e
|
|
46
|
+
ORDER BY e.from DESC
|
|
47
|
+
LIMIT 1
|
|
48
|
+
}
|
|
49
|
+
WITH rel, n, e
|
|
50
|
+
WHERE e.status = "active" AND e.hierarchy IS NULL
|
|
51
|
+
SET e.hierarchy = $hierarchy_map[n.kind]
|
|
52
|
+
"""
|
|
53
|
+
self.add_to_query(query)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class Migration022(GraphMigration):
|
|
57
|
+
"""
|
|
58
|
+
A bug in diff merge logic caused the hierarchy information on IS_RELATED edges to be lost when merged into
|
|
59
|
+
main. This migration backfills the missing hierarchy data and accounts for the case when the branch that
|
|
60
|
+
created the data has been deleted.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
name: str = "022_backfill_hierarchy"
|
|
64
|
+
minimum_version: int = 21
|
|
65
|
+
queries: Sequence[type[Query]] = [BackfillMissingHierarchyQuery]
|
|
66
|
+
|
|
67
|
+
async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
|
|
68
|
+
result = MigrationResult()
|
|
69
|
+
return result
|
|
@@ -9,8 +9,6 @@ from infrahub.core.graph.schema import GraphNodeRelationships, GraphRelDirection
|
|
|
9
9
|
from infrahub.core.query import Query, QueryType
|
|
10
10
|
|
|
11
11
|
if TYPE_CHECKING:
|
|
12
|
-
from pydantic.fields import FieldInfo
|
|
13
|
-
|
|
14
12
|
from infrahub.database import InfrahubDatabase
|
|
15
13
|
|
|
16
14
|
|
|
@@ -47,37 +45,37 @@ class NodeDuplicateQuery(Query):
|
|
|
47
45
|
return query
|
|
48
46
|
|
|
49
47
|
@staticmethod
|
|
50
|
-
def _render_sub_query_per_rel_type(
|
|
51
|
-
rel_name: str,
|
|
52
|
-
rel_type: str,
|
|
53
|
-
rel_def: FieldInfo,
|
|
54
|
-
) -> str:
|
|
48
|
+
def _render_sub_query_per_rel_type(rel_name: str, rel_type: str, rel_dir: GraphRelDirection) -> str:
|
|
55
49
|
subquery = [
|
|
56
50
|
f"WITH peer_node, {rel_name}, active_node, new_node",
|
|
57
51
|
f"WITH peer_node, {rel_name}, active_node, new_node",
|
|
58
52
|
f'WHERE type({rel_name}) = "{rel_type}"',
|
|
59
53
|
]
|
|
60
|
-
if
|
|
54
|
+
if rel_dir in [GraphRelDirection.OUTBOUND, GraphRelDirection.EITHER]:
|
|
61
55
|
subquery.append(f"""
|
|
62
56
|
CREATE (new_node)-[new_active_edge:{rel_type} $rel_props_new ]->(peer_node)
|
|
63
57
|
SET new_active_edge.branch = CASE WHEN {rel_name}.branch = "-global-" THEN "-global-" ELSE $branch END
|
|
64
58
|
SET new_active_edge.branch_level = CASE WHEN {rel_name}.branch = "-global-" THEN {rel_name}.branch_level ELSE $branch_level END
|
|
59
|
+
SET new_active_edge.hierarchy = COALESCE({rel_name}.hierarchy, NULL)
|
|
65
60
|
""")
|
|
66
61
|
subquery.append(f"""
|
|
67
62
|
CREATE (active_node)-[deleted_edge:{rel_type} $rel_props_prev ]->(peer_node)
|
|
68
63
|
SET deleted_edge.branch = CASE WHEN {rel_name}.branch = "-global-" THEN "-global-" ELSE $branch END
|
|
69
64
|
SET deleted_edge.branch_level = CASE WHEN {rel_name}.branch = "-global-" THEN {rel_name}.branch_level ELSE $branch_level END
|
|
65
|
+
SET deleted_edge.hierarchy = COALESCE({rel_name}.hierarchy, NULL)
|
|
70
66
|
""")
|
|
71
|
-
elif
|
|
67
|
+
elif rel_dir in [GraphRelDirection.INBOUND, GraphRelDirection.EITHER]:
|
|
72
68
|
subquery.append(f"""
|
|
73
69
|
CREATE (new_node)<-[new_active_edge:{rel_type} $rel_props_new ]-(peer_node)
|
|
74
70
|
SET new_active_edge.branch = CASE WHEN {rel_name}.branch = "-global-" THEN "-global-" ELSE $branch END
|
|
75
71
|
SET new_active_edge.branch_level = CASE WHEN {rel_name}.branch = "-global-" THEN {rel_name}.branch_level ELSE $branch_level END
|
|
72
|
+
SET new_active_edge.hierarchy = COALESCE({rel_name}.hierarchy, NULL)
|
|
76
73
|
""")
|
|
77
74
|
subquery.append(f"""
|
|
78
75
|
CREATE (active_node)<-[deleted_edge:{rel_type} $rel_props_prev ]-(peer_node)
|
|
79
|
-
SET
|
|
80
|
-
SET
|
|
76
|
+
SET deleted_edge.branch = CASE WHEN {rel_name}.branch = "-global-" THEN "-global-" ELSE $branch END
|
|
77
|
+
SET deleted_edge.branch_level = CASE WHEN {rel_name}.branch = "-global-" THEN {rel_name}.branch_level ELSE $branch_level END
|
|
78
|
+
SET deleted_edge.hierarchy = COALESCE({rel_name}.hierarchy, NULL)
|
|
81
79
|
""")
|
|
82
80
|
subquery.append("RETURN peer_node as p2")
|
|
83
81
|
return "\n".join(subquery)
|
|
@@ -86,11 +84,10 @@ class NodeDuplicateQuery(Query):
|
|
|
86
84
|
def _render_sub_query_out(cls) -> str:
|
|
87
85
|
sub_queries_out = [
|
|
88
86
|
cls._render_sub_query_per_rel_type(
|
|
89
|
-
rel_name="rel_outband",
|
|
90
|
-
rel_type=rel_type,
|
|
91
|
-
rel_def=rel_def,
|
|
87
|
+
rel_name="rel_outband", rel_type=rel_type, rel_dir=GraphRelDirection.OUTBOUND
|
|
92
88
|
)
|
|
93
|
-
for rel_type,
|
|
89
|
+
for rel_type, field_info in GraphNodeRelationships.model_fields.items()
|
|
90
|
+
if field_info.default.direction in (GraphRelDirection.OUTBOUND, GraphRelDirection.EITHER)
|
|
94
91
|
]
|
|
95
92
|
sub_query_out = "\nUNION\n".join(sub_queries_out)
|
|
96
93
|
return sub_query_out
|
|
@@ -99,11 +96,10 @@ class NodeDuplicateQuery(Query):
|
|
|
99
96
|
def _render_sub_query_in(cls) -> str:
|
|
100
97
|
sub_queries_in = [
|
|
101
98
|
cls._render_sub_query_per_rel_type(
|
|
102
|
-
rel_name="rel_inband",
|
|
103
|
-
rel_type=rel_type,
|
|
104
|
-
rel_def=rel_def,
|
|
99
|
+
rel_name="rel_inband", rel_type=rel_type, rel_dir=GraphRelDirection.INBOUND
|
|
105
100
|
)
|
|
106
|
-
for rel_type,
|
|
101
|
+
for rel_type, field_info in GraphNodeRelationships.model_fields.items()
|
|
102
|
+
if field_info.default.direction in (GraphRelDirection.INBOUND, GraphRelDirection.EITHER)
|
|
107
103
|
]
|
|
108
104
|
sub_query_in = "\nUNION\n".join(sub_queries_in)
|
|
109
105
|
return sub_query_in
|
|
@@ -168,11 +164,12 @@ class NodeDuplicateQuery(Query):
|
|
|
168
164
|
FOREACH (i in CASE WHEN rel_outband.branch IN ["-global-", $branch] THEN [1] ELSE [] END |
|
|
169
165
|
SET rel_outband.to = $current_time
|
|
170
166
|
)
|
|
171
|
-
WITH active_node, new_node
|
|
167
|
+
WITH DISTINCT active_node, new_node
|
|
168
|
+
// Process Inbound Relationship
|
|
172
169
|
MATCH (active_node)<-[]-(peer)
|
|
173
170
|
CALL {
|
|
174
171
|
WITH active_node, peer
|
|
175
|
-
MATCH (active_node)
|
|
172
|
+
MATCH (active_node)<-[r]-(peer)
|
|
176
173
|
WHERE %(branch_filter)s
|
|
177
174
|
RETURN active_node as n1, r as rel_inband1, peer as p1
|
|
178
175
|
ORDER BY r.branch_level DESC, r.from DESC
|
infrahub/core/models.py
CHANGED
|
@@ -19,6 +19,8 @@ if TYPE_CHECKING:
|
|
|
19
19
|
from infrahub.core.schema import MainSchemaTypes
|
|
20
20
|
from infrahub.core.schema.schema_branch import SchemaBranch
|
|
21
21
|
|
|
22
|
+
GENERIC_ATTRIBUTES_TO_IGNORE = ["namespace", "name", "branch"]
|
|
23
|
+
|
|
22
24
|
|
|
23
25
|
class NodeKind(BaseModel):
|
|
24
26
|
namespace: str
|
|
@@ -182,6 +184,15 @@ class SchemaUpdateValidationResult(BaseModel):
|
|
|
182
184
|
|
|
183
185
|
for schema_name, schema_diff in self.diff.changed.items():
|
|
184
186
|
schema_node = schema.get(name=schema_name, duplicate=False)
|
|
187
|
+
if "inherit_from" in schema_diff.changed:
|
|
188
|
+
self.migrations.append(
|
|
189
|
+
SchemaUpdateMigrationInfo(
|
|
190
|
+
path=SchemaPath( # type: ignore[call-arg]
|
|
191
|
+
schema_kind=schema_name, path_type=SchemaPathType.NODE
|
|
192
|
+
),
|
|
193
|
+
migration_name="node.inherit_from.update",
|
|
194
|
+
)
|
|
195
|
+
)
|
|
185
196
|
|
|
186
197
|
# Nothing to do today if we add a new attribute to a node in the schema
|
|
187
198
|
# for node_field_name, _ in schema_diff.added.items():
|
|
@@ -262,6 +273,10 @@ class SchemaUpdateValidationResult(BaseModel):
|
|
|
262
273
|
field_info = schema.model_fields[node_field_name]
|
|
263
274
|
field_update = str(field_info.json_schema_extra.get("update")) # type: ignore[union-attr]
|
|
264
275
|
|
|
276
|
+
# No need to execute a migration for generic nodes attributes because they are not stored in the database
|
|
277
|
+
if schema.is_generic_schema and node_field_name in GENERIC_ATTRIBUTES_TO_IGNORE:
|
|
278
|
+
return
|
|
279
|
+
|
|
265
280
|
schema_path = SchemaPath( # type: ignore[call-arg]
|
|
266
281
|
schema_kind=schema.kind,
|
|
267
282
|
path_type=SchemaPathType.NODE,
|
infrahub/core/query/node.py
CHANGED
|
@@ -666,18 +666,21 @@ class NodeListGetRelationshipsQuery(Query):
|
|
|
666
666
|
MATCH paths_in = ((n)<-[r1:IS_RELATED]-(rel:Relationship)<-[r2:IS_RELATED]-(peer))
|
|
667
667
|
WHERE ($relationship_identifiers IS NULL OR rel.name in $relationship_identifiers)
|
|
668
668
|
AND all(r IN relationships(paths_in) WHERE (%(filters)s))
|
|
669
|
+
AND n.uuid <> peer.uuid
|
|
669
670
|
RETURN n, rel, peer, r1, r2, "inbound" as direction
|
|
670
671
|
UNION
|
|
671
672
|
MATCH (n:Node) WHERE n.uuid IN $ids
|
|
672
673
|
MATCH paths_out = ((n)-[r1:IS_RELATED]->(rel:Relationship)-[r2:IS_RELATED]->(peer))
|
|
673
674
|
WHERE ($relationship_identifiers IS NULL OR rel.name in $relationship_identifiers)
|
|
674
675
|
AND all(r IN relationships(paths_out) WHERE (%(filters)s))
|
|
676
|
+
AND n.uuid <> peer.uuid
|
|
675
677
|
RETURN n, rel, peer, r1, r2, "outbound" as direction
|
|
676
678
|
UNION
|
|
677
679
|
MATCH (n:Node) WHERE n.uuid IN $ids
|
|
678
680
|
MATCH paths_bidir = ((n)-[r1:IS_RELATED]->(rel:Relationship)<-[r2:IS_RELATED]-(peer))
|
|
679
681
|
WHERE ($relationship_identifiers IS NULL OR rel.name in $relationship_identifiers)
|
|
680
682
|
AND all(r IN relationships(paths_bidir) WHERE (%(filters)s))
|
|
683
|
+
AND n.uuid <> peer.uuid
|
|
681
684
|
RETURN n, rel, peer, r1, r2, "bidirectional" as direction
|
|
682
685
|
""" % {"filters": rels_filter}
|
|
683
686
|
|
|
@@ -1,8 +1,12 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
from abc import ABC, abstractmethod
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
2
5
|
|
|
3
|
-
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
from infrahub.core.path import GroupedDataPaths
|
|
4
8
|
|
|
5
|
-
from .model import SchemaConstraintValidatorRequest
|
|
9
|
+
from .model import SchemaConstraintValidatorRequest
|
|
6
10
|
|
|
7
11
|
|
|
8
12
|
class ConstraintCheckerInterface(ABC):
|
|
@@ -5,6 +5,7 @@ from pydantic import BaseModel, Field
|
|
|
5
5
|
from infrahub.core.branch import Branch
|
|
6
6
|
from infrahub.core.path import SchemaPath
|
|
7
7
|
from infrahub.core.schema import GenericSchema, NodeSchema
|
|
8
|
+
from infrahub.core.schema.schema_branch import SchemaBranch
|
|
8
9
|
|
|
9
10
|
|
|
10
11
|
class SchemaConstraintValidatorRequest(BaseModel):
|
|
@@ -12,6 +13,7 @@ class SchemaConstraintValidatorRequest(BaseModel):
|
|
|
12
13
|
constraint_name: str = Field(..., description="The name of the constraint to validate")
|
|
13
14
|
node_schema: Union[NodeSchema, GenericSchema] = Field(..., description="Schema of Node or Generic to validate")
|
|
14
15
|
schema_path: SchemaPath = Field(..., description="SchemaPath to the element of the schema to validate")
|
|
16
|
+
schema_branch: SchemaBranch = Field(..., description="SchemaBranch of the element to validate")
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
class SchemaViolation(BaseModel):
|
|
@@ -7,9 +7,7 @@ from infrahub.core.path import DataPath, GroupedDataPaths
|
|
|
7
7
|
from infrahub.core.schema import NodeSchema
|
|
8
8
|
|
|
9
9
|
from ..interface import ConstraintCheckerInterface
|
|
10
|
-
from ..shared import
|
|
11
|
-
SchemaValidatorQuery,
|
|
12
|
-
)
|
|
10
|
+
from ..shared import SchemaValidatorQuery
|
|
13
11
|
|
|
14
12
|
if TYPE_CHECKING:
|
|
15
13
|
from infrahub.core.branch import Branch
|
|
@@ -6,7 +6,8 @@ from infrahub_sdk.utils import compare_lists
|
|
|
6
6
|
|
|
7
7
|
from infrahub.core.constants import PathType
|
|
8
8
|
from infrahub.core.path import DataPath, GroupedDataPaths
|
|
9
|
-
from infrahub.core.schema import NodeSchema
|
|
9
|
+
from infrahub.core.schema import MainSchemaTypes, NodeSchema
|
|
10
|
+
from infrahub.exceptions import SchemaNotFoundError
|
|
10
11
|
|
|
11
12
|
from ..interface import ConstraintCheckerInterface
|
|
12
13
|
|
|
@@ -41,8 +42,29 @@ class NodeInheritFromChecker(ConstraintCheckerInterface):
|
|
|
41
42
|
return grouped_data_paths_list
|
|
42
43
|
|
|
43
44
|
_, removed, _ = compare_lists(list1=current_schema.inherit_from, list2=request.node_schema.inherit_from)
|
|
44
|
-
|
|
45
|
-
|
|
45
|
+
current_inherit_from_ids = {
|
|
46
|
+
g.id: g.kind
|
|
47
|
+
for g in [
|
|
48
|
+
self.db.schema.get(name=n, branch=request.branch, duplicate=False) for n in current_schema.inherit_from
|
|
49
|
+
]
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
# Gather IDs for each inherited node in use for candidate schema
|
|
53
|
+
request_inherited: list[MainSchemaTypes] = []
|
|
54
|
+
for n in request.node_schema.inherit_from:
|
|
55
|
+
try:
|
|
56
|
+
schema = request.schema_branch.get(name=n, duplicate=False)
|
|
57
|
+
except SchemaNotFoundError:
|
|
58
|
+
schema = self.db.schema.get(name=n, branch=request.branch, duplicate=False)
|
|
59
|
+
request_inherited.append(schema)
|
|
60
|
+
request_inherit_from_ids = {g.id: g.kind for g in request_inherited}
|
|
61
|
+
|
|
62
|
+
# Compare IDs to find out if some inherited nodes were removed
|
|
63
|
+
# Comparing IDs helps us in understanding if a node was renamed or really removed
|
|
64
|
+
_, removed_ids, _ = compare_lists(
|
|
65
|
+
list1=list(current_inherit_from_ids.keys()), list2=list(request_inherit_from_ids.keys())
|
|
66
|
+
)
|
|
67
|
+
if removed := [current_inherit_from_ids[k] for k in removed_ids]:
|
|
46
68
|
group_data_path.add_data_path(
|
|
47
69
|
DataPath(
|
|
48
70
|
branch=str(request.branch.name),
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
|
|
3
5
|
from infrahub_sdk.batch import InfrahubBatch
|
|
4
6
|
from prefect import flow, task
|
|
5
7
|
from prefect.cache_policies import NONE
|
|
@@ -18,6 +20,9 @@ from infrahub.workflows.utils import add_tags
|
|
|
18
20
|
|
|
19
21
|
from .models.validate_migration import SchemaValidateMigrationData, SchemaValidatorPathResponseData
|
|
20
22
|
|
|
23
|
+
if TYPE_CHECKING:
|
|
24
|
+
from infrahub.core.schema.schema_branch import SchemaBranch
|
|
25
|
+
|
|
21
26
|
|
|
22
27
|
@flow(name="schema_validate_migrations", flow_run_name="Validate schema migrations", persist_result=True)
|
|
23
28
|
async def schema_validate_migrations(message: SchemaValidateMigrationData) -> list[SchemaValidatorPathResponseData]:
|
|
@@ -41,6 +46,7 @@ async def schema_validate_migrations(message: SchemaValidateMigrationData) -> li
|
|
|
41
46
|
constraint_name=constraint.constraint_name,
|
|
42
47
|
node_schema=schema,
|
|
43
48
|
schema_path=constraint.path,
|
|
49
|
+
schema_branch=message.schema_branch,
|
|
44
50
|
)
|
|
45
51
|
|
|
46
52
|
results = [result async for _, result in batch.execute()]
|
|
@@ -59,6 +65,7 @@ async def schema_path_validate(
|
|
|
59
65
|
constraint_name: str,
|
|
60
66
|
node_schema: NodeSchema | GenericSchema,
|
|
61
67
|
schema_path: SchemaPath,
|
|
68
|
+
schema_branch: SchemaBranch,
|
|
62
69
|
) -> SchemaValidatorPathResponseData:
|
|
63
70
|
service = services.service
|
|
64
71
|
|
|
@@ -68,6 +75,7 @@ async def schema_path_validate(
|
|
|
68
75
|
constraint_name=constraint_name,
|
|
69
76
|
node_schema=node_schema,
|
|
70
77
|
schema_path=schema_path,
|
|
78
|
+
schema_branch=schema_branch,
|
|
71
79
|
)
|
|
72
80
|
|
|
73
81
|
component_registry = get_component_registry()
|
|
@@ -1,21 +1,16 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
import asyncio
|
|
2
4
|
from itertools import chain
|
|
3
|
-
from typing import Optional, Union
|
|
5
|
+
from typing import TYPE_CHECKING, Optional, Union
|
|
4
6
|
|
|
5
7
|
from infrahub.core import registry
|
|
6
8
|
from infrahub.core.branch import Branch
|
|
7
9
|
from infrahub.core.path import DataPath, GroupedDataPaths
|
|
8
|
-
from infrahub.core.
|
|
9
|
-
from infrahub.core.schema import (
|
|
10
|
-
AttributeSchema,
|
|
11
|
-
MainSchemaTypes,
|
|
12
|
-
RelationshipSchema,
|
|
13
|
-
)
|
|
10
|
+
from infrahub.core.schema import AttributeSchema, MainSchemaTypes, RelationshipSchema
|
|
14
11
|
from infrahub.core.validators.uniqueness.index import UniquenessQueryResultsIndex
|
|
15
|
-
from infrahub.database import InfrahubDatabase
|
|
16
12
|
|
|
17
13
|
from ..interface import ConstraintCheckerInterface
|
|
18
|
-
from ..model import SchemaConstraintValidatorRequest
|
|
19
14
|
from .model import (
|
|
20
15
|
NodeUniquenessQueryRequest,
|
|
21
16
|
NonUniqueAttribute,
|
|
@@ -26,6 +21,12 @@ from .model import (
|
|
|
26
21
|
)
|
|
27
22
|
from .query import NodeUniqueAttributeConstraintQuery
|
|
28
23
|
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
from infrahub.core.query import QueryResult
|
|
26
|
+
from infrahub.database import InfrahubDatabase
|
|
27
|
+
|
|
28
|
+
from ..model import SchemaConstraintValidatorRequest
|
|
29
|
+
|
|
29
30
|
|
|
30
31
|
def get_attribute_path_from_string(
|
|
31
32
|
path: str, schema: MainSchemaTypes
|
infrahub/database/__init__.py
CHANGED
|
@@ -34,7 +34,7 @@ from infrahub.utils import InfrahubStringEnum
|
|
|
34
34
|
|
|
35
35
|
from .constants import DatabaseType, Neo4jRuntime
|
|
36
36
|
from .memgraph import DatabaseManagerMemgraph
|
|
37
|
-
from .metrics import QUERY_EXECUTION_METRICS, TRANSACTION_RETRIES
|
|
37
|
+
from .metrics import CONNECTION_POOL_USAGE, QUERY_EXECUTION_METRICS, TRANSACTION_RETRIES
|
|
38
38
|
from .neo4j import DatabaseManagerNeo4j
|
|
39
39
|
|
|
40
40
|
if TYPE_CHECKING:
|
|
@@ -335,6 +335,14 @@ class InfrahubDatabase:
|
|
|
335
335
|
context: dict[str, str] | None = None,
|
|
336
336
|
type: QueryType | None = None, # pylint: disable=redefined-builtin
|
|
337
337
|
) -> tuple[list[Record], dict[str, Any]]:
|
|
338
|
+
connpool_usage = self._driver._pool.in_use_connection_count(self._driver._pool.address)
|
|
339
|
+
CONNECTION_POOL_USAGE.labels(self._driver._pool.address).set(float(connpool_usage))
|
|
340
|
+
|
|
341
|
+
if config.SETTINGS.database.max_concurrent_queries:
|
|
342
|
+
while connpool_usage > config.SETTINGS.database.max_concurrent_queries: # noqa: ASYNC110
|
|
343
|
+
await asyncio.sleep(config.SETTINGS.database.max_concurrent_queries_delay)
|
|
344
|
+
connpool_usage = self._driver._pool.in_use_connection_count(self._driver._pool.address)
|
|
345
|
+
|
|
338
346
|
with trace.get_tracer(__name__).start_as_current_span("execute_db_query_with_metadata") as span:
|
|
339
347
|
span.set_attribute("query", query)
|
|
340
348
|
if name:
|
infrahub/database/metrics.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from prometheus_client import Counter, Histogram
|
|
3
|
+
from prometheus_client import Counter, Gauge, Histogram
|
|
4
4
|
|
|
5
5
|
METRIC_PREFIX = "infrahub_db"
|
|
6
6
|
|
|
@@ -16,3 +16,9 @@ TRANSACTION_RETRIES = Counter(
|
|
|
16
16
|
"Number of transaction that have been retried due to transcient error",
|
|
17
17
|
labelnames=["name"],
|
|
18
18
|
)
|
|
19
|
+
|
|
20
|
+
CONNECTION_POOL_USAGE = Gauge(
|
|
21
|
+
f"{METRIC_PREFIX}_last_connection_pool_usage",
|
|
22
|
+
"Number of last known active connections in the pool",
|
|
23
|
+
labelnames=["address"],
|
|
24
|
+
)
|
|
@@ -8,6 +8,7 @@ from starlette.background import BackgroundTasks
|
|
|
8
8
|
from infrahub.core import registry
|
|
9
9
|
from infrahub.core.timestamp import Timestamp
|
|
10
10
|
from infrahub.exceptions import InitializationError
|
|
11
|
+
from infrahub.graphql.resolvers.many_relationship import ManyRelationshipResolver
|
|
11
12
|
from infrahub.graphql.resolvers.single_relationship import SingleRelationshipResolver
|
|
12
13
|
from infrahub.permissions import PermissionManager
|
|
13
14
|
|
|
@@ -35,6 +36,7 @@ class GraphqlContext:
|
|
|
35
36
|
branch: Branch
|
|
36
37
|
types: dict
|
|
37
38
|
single_relationship_resolver: SingleRelationshipResolver
|
|
39
|
+
many_relationship_resolver: ManyRelationshipResolver
|
|
38
40
|
at: Timestamp | None = None
|
|
39
41
|
related_node_ids: set | None = None
|
|
40
42
|
service: InfrahubServices | None = None
|
|
@@ -107,6 +109,7 @@ async def prepare_graphql_params(
|
|
|
107
109
|
db=db,
|
|
108
110
|
branch=branch,
|
|
109
111
|
single_relationship_resolver=SingleRelationshipResolver(),
|
|
112
|
+
many_relationship_resolver=ManyRelationshipResolver(),
|
|
110
113
|
at=Timestamp(at),
|
|
111
114
|
types=gqlm.get_graphql_types(),
|
|
112
115
|
related_node_ids=set(),
|