infrahub-server 1.5.0b2__py3-none-any.whl → 1.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/api/dependencies.py +4 -13
- infrahub/api/transformation.py +22 -20
- infrahub/cli/db.py +87 -65
- infrahub/cli/upgrade.py +27 -7
- infrahub/core/diff/calculator.py +2 -2
- infrahub/core/diff/query/delete_query.py +9 -5
- infrahub/core/diff/query/merge.py +39 -23
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/migrations/graph/__init__.py +5 -3
- infrahub/core/migrations/graph/m037_index_attr_vals.py +11 -30
- infrahub/core/migrations/graph/m039_ipam_reconcile.py +9 -7
- infrahub/core/migrations/graph/m041_deleted_dup_edges.py +149 -0
- infrahub/core/migrations/graph/{m041_profile_attrs_in_db.py → m042_profile_attrs_in_db.py} +10 -8
- infrahub/core/migrations/graph/{m042_create_hfid_display_label_in_db.py → m043_create_hfid_display_label_in_db.py} +6 -6
- infrahub/core/migrations/graph/{m043_backfill_hfid_display_label_in_db.py → m044_backfill_hfid_display_label_in_db.py} +9 -11
- infrahub/core/migrations/shared.py +14 -0
- infrahub/core/models.py +2 -2
- infrahub/core/node/__init__.py +26 -1
- infrahub/core/query/diff.py +61 -16
- infrahub/core/query/ipam.py +15 -4
- infrahub/core/query/node.py +42 -40
- infrahub/core/relationship/model.py +10 -5
- infrahub/core/schema/definitions/core/check.py +1 -1
- infrahub/core/schema/definitions/core/transform.py +1 -1
- infrahub/core/schema/schema_branch_display.py +12 -0
- infrahub/core/schema/schema_branch_hfid.py +6 -0
- infrahub/core/validators/uniqueness/checker.py +2 -1
- infrahub/database/__init__.py +0 -13
- infrahub/graphql/analyzer.py +9 -0
- infrahub/graphql/mutations/branch.py +5 -0
- infrahub/graphql/mutations/proposed_change.py +6 -0
- infrahub/message_bus/types.py +1 -0
- infrahub/profiles/queries/get_profile_data.py +4 -5
- infrahub/proposed_change/tasks.py +43 -9
- infrahub_sdk/analyzer.py +1 -1
- infrahub_sdk/batch.py +2 -2
- infrahub_sdk/branch.py +14 -2
- infrahub_sdk/checks.py +1 -1
- infrahub_sdk/client.py +2 -4
- infrahub_sdk/ctl/branch.py +3 -0
- infrahub_sdk/ctl/cli_commands.py +2 -0
- infrahub_sdk/ctl/exceptions.py +1 -1
- infrahub_sdk/ctl/task.py +110 -0
- infrahub_sdk/exceptions.py +18 -18
- infrahub_sdk/graphql/query.py +2 -2
- infrahub_sdk/node/attribute.py +1 -1
- infrahub_sdk/node/property.py +1 -1
- infrahub_sdk/node/related_node.py +3 -3
- infrahub_sdk/node/relationship.py +4 -6
- infrahub_sdk/object_store.py +2 -2
- infrahub_sdk/operation.py +1 -1
- infrahub_sdk/protocols_generator/generator.py +1 -1
- infrahub_sdk/pytest_plugin/exceptions.py +9 -9
- infrahub_sdk/pytest_plugin/items/base.py +1 -1
- infrahub_sdk/pytest_plugin/items/check.py +1 -1
- infrahub_sdk/pytest_plugin/items/python_transform.py +1 -1
- infrahub_sdk/repository.py +1 -1
- infrahub_sdk/schema/__init__.py +1 -1
- infrahub_sdk/spec/object.py +7 -3
- infrahub_sdk/task/exceptions.py +4 -4
- infrahub_sdk/task/manager.py +2 -2
- infrahub_sdk/task/models.py +6 -4
- infrahub_sdk/timestamp.py +1 -1
- infrahub_sdk/transfer/exporter/json.py +1 -1
- infrahub_sdk/transfer/importer/json.py +1 -1
- infrahub_sdk/transforms.py +1 -1
- {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/METADATA +2 -2
- {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/RECORD +75 -73
- infrahub_testcontainers/container.py +31 -5
- infrahub_testcontainers/helpers.py +19 -4
- infrahub_testcontainers/models.py +8 -6
- infrahub_testcontainers/performance_test.py +6 -4
- {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/WHEEL +0 -0
- {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/entry_points.txt +0 -0
|
@@ -49,11 +49,18 @@ WITH node_diff_map, is_node_kind_migration, CASE
|
|
|
49
49
|
WHEN is_node_kind_migration THEN $migrated_kinds_id_map[node_diff_map.uuid]
|
|
50
50
|
ELSE NULL
|
|
51
51
|
END AS node_db_id
|
|
52
|
+
|
|
53
|
+
// ------------------------------
|
|
54
|
+
// find the correct Node if the Node had its kind/inheritance migrated
|
|
55
|
+
// and there are multiple Nodes with the same UUID
|
|
56
|
+
// ------------------------------
|
|
52
57
|
CALL (node_diff_map, node_db_id) {
|
|
53
|
-
MATCH (n:Node {uuid: node_diff_map.uuid})
|
|
54
|
-
WHERE node_db_id IS NULL
|
|
55
|
-
|
|
58
|
+
MATCH (n:Node {uuid: node_diff_map.uuid})-[n_is_part_of:IS_PART_OF]->(:Root)
|
|
59
|
+
WHERE node_db_id IS NULL OR %(id_func)s(n) = node_db_id
|
|
60
|
+
AND n_is_part_of.branch IN [$source_branch, $target_branch]
|
|
56
61
|
RETURN n
|
|
62
|
+
ORDER BY n_is_part_of.branch_level DESC, n_is_part_of.from DESC, n_is_part_of.status ASC
|
|
63
|
+
LIMIT 1
|
|
57
64
|
}
|
|
58
65
|
WITH n, node_diff_map, is_node_kind_migration
|
|
59
66
|
CALL (n, node_diff_map, is_node_kind_migration) {
|
|
@@ -224,16 +231,28 @@ CALL (n, node_diff_map, is_node_kind_migration) {
|
|
|
224
231
|
ELSE NULL
|
|
225
232
|
END AS rel_peer_db_id
|
|
226
233
|
// ------------------------------
|
|
234
|
+
// find the correct relationship peer if the peer had its kind/inheritance migrated
|
|
235
|
+
// and there are multiple Nodes with the same UUID
|
|
236
|
+
// ------------------------------
|
|
237
|
+
CALL (rel_peer_id, rel_peer_db_id) {
|
|
238
|
+
MATCH (rel_peer:Node {uuid: rel_peer_id})-[target_is_part_of:IS_PART_OF]->(:Root)
|
|
239
|
+
WHERE (rel_peer_db_id IS NULL OR %(id_func)s(rel_peer) = rel_peer_db_id)
|
|
240
|
+
AND target_is_part_of.branch IN [$source_branch, $target_branch]
|
|
241
|
+
RETURN rel_peer
|
|
242
|
+
ORDER BY target_is_part_of.branch_level DESC, target_is_part_of.from DESC, target_is_part_of.status ASC
|
|
243
|
+
LIMIT 1
|
|
244
|
+
}
|
|
245
|
+
WITH rel_name, related_rel_status, rel_peer
|
|
246
|
+
// ------------------------------
|
|
227
247
|
// determine the directions of each IS_RELATED
|
|
228
248
|
// ------------------------------
|
|
229
|
-
CALL (n, rel_name,
|
|
249
|
+
CALL (n, rel_name, rel_peer, related_rel_status) {
|
|
230
250
|
MATCH (n)
|
|
231
251
|
-[source_r_rel_1:IS_RELATED]
|
|
232
252
|
-(r:Relationship {name: rel_name})
|
|
233
253
|
-[source_r_rel_2:IS_RELATED]
|
|
234
|
-
-(rel_peer
|
|
235
|
-
WHERE
|
|
236
|
-
AND source_r_rel_1.branch IN [$source_branch, $target_branch]
|
|
254
|
+
-(rel_peer)
|
|
255
|
+
WHERE source_r_rel_1.branch IN [$source_branch, $target_branch]
|
|
237
256
|
AND source_r_rel_2.branch IN [$source_branch, $target_branch]
|
|
238
257
|
AND source_r_rel_1.from <= $at AND source_r_rel_1.to IS NULL
|
|
239
258
|
AND source_r_rel_2.from <= $at AND source_r_rel_2.to IS NULL
|
|
@@ -251,37 +270,34 @@ CALL (n, node_diff_map, is_node_kind_migration) {
|
|
|
251
270
|
source_r_rel_1.hierarchy AS r1_hierarchy,
|
|
252
271
|
source_r_rel_2.hierarchy AS r2_hierarchy
|
|
253
272
|
}
|
|
254
|
-
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name,
|
|
255
|
-
CALL (n, rel_name,
|
|
273
|
+
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status
|
|
274
|
+
CALL (n, rel_name, rel_peer, related_rel_status) {
|
|
256
275
|
OPTIONAL MATCH (n)
|
|
257
276
|
-[target_r_rel_1:IS_RELATED {branch: $target_branch, status: "active"}]
|
|
258
277
|
-(:Relationship {name: rel_name})
|
|
259
278
|
-[target_r_rel_2:IS_RELATED {branch: $target_branch, status: "active"}]
|
|
260
|
-
-(rel_peer
|
|
279
|
+
-(rel_peer)
|
|
261
280
|
WHERE related_rel_status = "deleted"
|
|
262
|
-
AND (rel_peer_db_id IS NULL OR %(id_func)s(rel_peer) = rel_peer_db_id)
|
|
263
281
|
AND target_r_rel_1.from <= $at AND target_r_rel_1.to IS NULL
|
|
264
282
|
AND target_r_rel_2.from <= $at AND target_r_rel_2.to IS NULL
|
|
265
283
|
SET target_r_rel_1.to = $at
|
|
266
284
|
SET target_r_rel_2.to = $at
|
|
267
285
|
}
|
|
268
|
-
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name,
|
|
286
|
+
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status
|
|
269
287
|
// ------------------------------
|
|
270
288
|
// conditionally create new IS_RELATED relationships on target_branch, if necessary
|
|
271
289
|
// ------------------------------
|
|
272
|
-
CALL (n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name,
|
|
273
|
-
MATCH (p:Node {uuid: rel_peer_id})
|
|
274
|
-
WHERE rel_peer_db_id IS NULL OR %(id_func)s(p) = rel_peer_db_id
|
|
290
|
+
CALL (n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status) {
|
|
275
291
|
OPTIONAL MATCH (n)
|
|
276
292
|
-[r_rel_1:IS_RELATED {branch: $target_branch, status: related_rel_status}]
|
|
277
293
|
-(:Relationship {name: rel_name})
|
|
278
294
|
-[r_rel_2:IS_RELATED {branch: $target_branch, status: related_rel_status}]
|
|
279
|
-
-(
|
|
295
|
+
-(rel_peer)
|
|
280
296
|
WHERE r_rel_1.from <= $at
|
|
281
297
|
AND (r_rel_1.to >= $at OR r_rel_1.to IS NULL)
|
|
282
298
|
AND r_rel_2.from <= $at
|
|
283
299
|
AND (r_rel_2.to >= $at OR r_rel_2.to IS NULL)
|
|
284
|
-
WITH
|
|
300
|
+
WITH rel_peer, r_rel_1, r_rel_2
|
|
285
301
|
WHERE r_rel_1 IS NULL
|
|
286
302
|
AND r_rel_2 IS NULL
|
|
287
303
|
// ------------------------------
|
|
@@ -301,19 +317,19 @@ CALL (n, node_diff_map, is_node_kind_migration) {
|
|
|
301
317
|
<-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r1_hierarchy}]
|
|
302
318
|
-(r)
|
|
303
319
|
}
|
|
304
|
-
CALL (r,
|
|
305
|
-
WITH r,
|
|
320
|
+
CALL (r, rel_peer, r2_dir, r2_hierarchy, related_rel_status) {
|
|
321
|
+
WITH r, rel_peer, r2_dir, r2_hierarchy, related_rel_status
|
|
306
322
|
WHERE r2_dir = "r"
|
|
307
323
|
CREATE (r)
|
|
308
324
|
-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r2_hierarchy}]
|
|
309
|
-
->(
|
|
325
|
+
->(rel_peer)
|
|
310
326
|
}
|
|
311
|
-
CALL (r,
|
|
312
|
-
WITH r,
|
|
327
|
+
CALL (r, rel_peer, r2_dir, r2_hierarchy, related_rel_status) {
|
|
328
|
+
WITH r, rel_peer, r2_dir, r2_hierarchy, related_rel_status
|
|
313
329
|
WHERE r2_dir = "l"
|
|
314
330
|
CREATE (r)
|
|
315
331
|
<-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r2_hierarchy}]
|
|
316
|
-
-(
|
|
332
|
+
-(rel_peer)
|
|
317
333
|
}
|
|
318
334
|
}
|
|
319
335
|
}
|
infrahub/core/graph/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
GRAPH_VERSION =
|
|
1
|
+
GRAPH_VERSION = 44
|
|
@@ -42,9 +42,10 @@ from .m037_index_attr_vals import Migration037
|
|
|
42
42
|
from .m038_redo_0000_prefix_fix import Migration038
|
|
43
43
|
from .m039_ipam_reconcile import Migration039
|
|
44
44
|
from .m040_duplicated_attributes import Migration040
|
|
45
|
-
from .
|
|
46
|
-
from .
|
|
47
|
-
from .
|
|
45
|
+
from .m041_deleted_dup_edges import Migration041
|
|
46
|
+
from .m042_profile_attrs_in_db import Migration042
|
|
47
|
+
from .m043_create_hfid_display_label_in_db import Migration043
|
|
48
|
+
from .m044_backfill_hfid_display_label_in_db import Migration044
|
|
48
49
|
|
|
49
50
|
if TYPE_CHECKING:
|
|
50
51
|
from ..shared import MigrationTypes
|
|
@@ -94,6 +95,7 @@ MIGRATIONS: list[type[MigrationTypes]] = [
|
|
|
94
95
|
Migration041,
|
|
95
96
|
Migration042,
|
|
96
97
|
Migration043,
|
|
98
|
+
Migration044,
|
|
97
99
|
]
|
|
98
100
|
|
|
99
101
|
|
|
@@ -3,13 +3,10 @@ from __future__ import annotations
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
from typing import TYPE_CHECKING, Any
|
|
5
5
|
|
|
6
|
-
from rich.console import Console
|
|
7
|
-
|
|
8
6
|
from infrahub.constants.database import IndexType
|
|
9
7
|
from infrahub.core.attribute import MAX_STRING_LENGTH
|
|
10
|
-
from infrahub.core.migrations.shared import MigrationResult
|
|
8
|
+
from infrahub.core.migrations.shared import MigrationResult, get_migration_console
|
|
11
9
|
from infrahub.core.query import Query, QueryType
|
|
12
|
-
from infrahub.core.timestamp import Timestamp
|
|
13
10
|
from infrahub.database.index import IndexItem
|
|
14
11
|
from infrahub.database.neo4j import IndexManagerNeo4j
|
|
15
12
|
from infrahub.log import get_logger
|
|
@@ -467,13 +464,11 @@ class Migration037(ArbitraryMigration):
|
|
|
467
464
|
return result
|
|
468
465
|
|
|
469
466
|
async def execute(self, db: InfrahubDatabase) -> MigrationResult: # noqa: PLR0915
|
|
470
|
-
console =
|
|
467
|
+
console = get_migration_console()
|
|
471
468
|
result = MigrationResult()
|
|
472
469
|
|
|
473
470
|
# find the active schema attributes that have a LARGE_ATTRIBUTE_TYPE kind on all branches
|
|
474
|
-
console.print(
|
|
475
|
-
f"{Timestamp().to_string()} Determining schema attribute types and timestamps on all branches...", end=""
|
|
476
|
-
)
|
|
471
|
+
console.print("Determining schema attribute types and timestamps on all branches...", end="")
|
|
477
472
|
get_large_attribute_types_query = await GetLargeAttributeTypesQuery.init(db=db)
|
|
478
473
|
await get_large_attribute_types_query.execute(db=db)
|
|
479
474
|
schema_attribute_timeframes = get_large_attribute_types_query.get_large_attribute_type_timeframes()
|
|
@@ -481,10 +476,7 @@ class Migration037(ArbitraryMigration):
|
|
|
481
476
|
|
|
482
477
|
# find which schema attributes are large_types in the default branch, but updated to non-large_type on other branches
|
|
483
478
|
# {(kind, attr_name): SchemaAttributeTimeframe}
|
|
484
|
-
console.print(
|
|
485
|
-
f"{Timestamp().to_string()} Determining which schema attributes have been updated to non-large_type on non-default branches...",
|
|
486
|
-
end="",
|
|
487
|
-
)
|
|
479
|
+
console.print("Determining schema attribute updates on non-default branches...", end="")
|
|
488
480
|
main_schema_attribute_timeframes_map: dict[tuple[str, str], SchemaAttributeTimeframe] = {}
|
|
489
481
|
for schema_attr_time in schema_attribute_timeframes:
|
|
490
482
|
if schema_attr_time.is_default_branch:
|
|
@@ -508,7 +500,7 @@ class Migration037(ArbitraryMigration):
|
|
|
508
500
|
console.print("done")
|
|
509
501
|
|
|
510
502
|
# drop the index on the AttributeValueNonIndexed vertex, there won't be any at this point anyway
|
|
511
|
-
console.print(
|
|
503
|
+
console.print("Dropping index on AttributeValueIndexed vertices...", end="")
|
|
512
504
|
index_manager = IndexManagerNeo4j(db=db)
|
|
513
505
|
index_manager.init(nodes=[AV_INDEXED_INDEX], rels=[])
|
|
514
506
|
await index_manager.drop()
|
|
@@ -516,7 +508,7 @@ class Migration037(ArbitraryMigration):
|
|
|
516
508
|
|
|
517
509
|
# create the temporary non-indexed attribute value vertices for LARGE_ATTRIBUTE_TYPE attributes
|
|
518
510
|
# start with default branch
|
|
519
|
-
console.print(
|
|
511
|
+
console.print("Creating temporary non-indexed attribute values for large attribute types...", end="")
|
|
520
512
|
large_schema_attribute_timeframes = [
|
|
521
513
|
schema_attr_time for schema_attr_time in schema_attribute_timeframes if schema_attr_time.is_large_type
|
|
522
514
|
]
|
|
@@ -528,10 +520,7 @@ class Migration037(ArbitraryMigration):
|
|
|
528
520
|
console.print("done")
|
|
529
521
|
|
|
530
522
|
# re-index attribute values on branches where the type was updated to non-large_type
|
|
531
|
-
console.print(
|
|
532
|
-
f"{Timestamp().to_string()} Indexing attribute values on branches where the attribute schema was updated to a non-large_type...",
|
|
533
|
-
end="",
|
|
534
|
-
)
|
|
523
|
+
console.print("Re-indexing attribute values on branches updated to non-large types...", end="")
|
|
535
524
|
for schema_attr_time in large_type_reverts:
|
|
536
525
|
revert_non_index_on_branch_query = await RevertNonIndexOnBranchQuery.init(
|
|
537
526
|
db=db, schema_attribute_timeframe=schema_attr_time
|
|
@@ -540,27 +529,19 @@ class Migration037(ArbitraryMigration):
|
|
|
540
529
|
console.print("done")
|
|
541
530
|
|
|
542
531
|
# set the AttributeValue vertices to be AttributeValueIndexed
|
|
543
|
-
console.print(
|
|
544
|
-
f"{Timestamp().to_string()} Update all AttributeValue vertices to add the AttributeValueIndexed label...",
|
|
545
|
-
end="",
|
|
546
|
-
)
|
|
532
|
+
console.print("Adding AttributeValueIndexed label to AttributeValue vertices...", end="")
|
|
547
533
|
set_attribute_value_indexed_query = await SetAttributeValueIndexedQuery.init(db=db)
|
|
548
534
|
await set_attribute_value_indexed_query.execute(db=db)
|
|
549
535
|
console.print("done")
|
|
550
536
|
|
|
551
537
|
# set AttributeValueNonIndexed vertices to just AttributeValue
|
|
552
|
-
console.print(
|
|
553
|
-
f"{Timestamp().to_string()} Update all AttributeValueNonIndexed vertices to be AttributeValue (no index)...",
|
|
554
|
-
end="",
|
|
555
|
-
)
|
|
538
|
+
console.print("Restoring AttributeValue label on AttributeValueNonIndexed vertices...", end="")
|
|
556
539
|
finalize_attribute_value_non_indexed_query = await FinalizeAttributeValueNonIndexedQuery.init(db=db)
|
|
557
540
|
await finalize_attribute_value_non_indexed_query.execute(db=db)
|
|
558
541
|
console.print("done")
|
|
559
542
|
|
|
560
543
|
# de-index all attribute values too large to be indexed
|
|
561
|
-
console.print(
|
|
562
|
-
f"{Timestamp().to_string()} De-index any legacy attribute data that is too large to be indexed...", end=""
|
|
563
|
-
)
|
|
544
|
+
console.print("De-indexing legacy attribute data exceeding index limits...", end="")
|
|
564
545
|
de_index_large_attribute_values_query = await DeIndexLargeAttributeValuesQuery.init(
|
|
565
546
|
db=db, max_value_size=MAX_STRING_LENGTH
|
|
566
547
|
)
|
|
@@ -568,7 +549,7 @@ class Migration037(ArbitraryMigration):
|
|
|
568
549
|
console.print("done")
|
|
569
550
|
|
|
570
551
|
# add the index back to the AttributeValueNonIndexed vertex
|
|
571
|
-
console.print(
|
|
552
|
+
console.print("Adding index back to the AttributeValueIndexed label...", end="")
|
|
572
553
|
index_manager = IndexManagerNeo4j(db=db)
|
|
573
554
|
index_manager.init(nodes=[AV_INDEXED_INDEX], rels=[])
|
|
574
555
|
await index_manager.add()
|
|
@@ -4,14 +4,13 @@ import ipaddress
|
|
|
4
4
|
from dataclasses import dataclass
|
|
5
5
|
from typing import TYPE_CHECKING, Any
|
|
6
6
|
|
|
7
|
-
from rich.console import Console
|
|
8
7
|
from rich.progress import Progress
|
|
9
8
|
|
|
10
9
|
from infrahub.core.branch.models import Branch
|
|
11
10
|
from infrahub.core.constants import InfrahubKind
|
|
12
11
|
from infrahub.core.initialization import initialization
|
|
13
12
|
from infrahub.core.ipam.reconciler import IpamReconciler
|
|
14
|
-
from infrahub.core.migrations.shared import MigrationResult
|
|
13
|
+
from infrahub.core.migrations.shared import MigrationResult, get_migration_console
|
|
15
14
|
from infrahub.core.query import Query, QueryType
|
|
16
15
|
from infrahub.lock import initialize_lock
|
|
17
16
|
from infrahub.log import get_logger
|
|
@@ -235,13 +234,13 @@ class Migration039(ArbitraryMigration):
|
|
|
235
234
|
return MigrationResult()
|
|
236
235
|
|
|
237
236
|
async def execute(self, db: InfrahubDatabase) -> MigrationResult:
|
|
238
|
-
console =
|
|
237
|
+
console = get_migration_console()
|
|
239
238
|
result = MigrationResult()
|
|
240
239
|
# load schemas from database into registry
|
|
241
240
|
initialize_lock()
|
|
242
241
|
await initialization(db=db)
|
|
243
242
|
|
|
244
|
-
console.print("Identifying IP prefixes
|
|
243
|
+
console.print("Identifying IP prefixes and addresses to reconcile...", end="")
|
|
245
244
|
find_nodes_query = await FindNodesToReconcileQuery.init(db=db)
|
|
246
245
|
await find_nodes_query.execute(db=db)
|
|
247
246
|
console.print("done")
|
|
@@ -250,16 +249,17 @@ class Migration039(ArbitraryMigration):
|
|
|
250
249
|
# reconciler cannot correctly handle a prefix that is its own parent
|
|
251
250
|
ip_node_details_list = find_nodes_query.get_nodes_to_reconcile()
|
|
252
251
|
uuids_to_check = {ip_node_details.node_uuid for ip_node_details in ip_node_details_list}
|
|
253
|
-
console.
|
|
252
|
+
console.log(f"{len(ip_node_details_list)} IP prefixes or addresses will be reconciled.")
|
|
254
253
|
|
|
255
|
-
console.print("Deleting
|
|
254
|
+
console.print("Deleting self-parent relationships prior to reconciliation...", end="")
|
|
256
255
|
delete_self_parent_relationships_query = await DeleteSelfParentRelationshipsQuery.init(
|
|
257
256
|
db=db, uuids_to_check=list(uuids_to_check)
|
|
258
257
|
)
|
|
259
258
|
await delete_self_parent_relationships_query.execute(db=db)
|
|
260
259
|
console.print("done")
|
|
261
260
|
|
|
262
|
-
|
|
261
|
+
console.log("Reconciling IP prefixes and addresses across branches...")
|
|
262
|
+
with Progress(console=console) as progress:
|
|
263
263
|
reconcile_task = progress.add_task("Reconciling IP prefixes/addresses...", total=len(ip_node_details_list))
|
|
264
264
|
|
|
265
265
|
for ip_node_details in ip_node_details_list:
|
|
@@ -271,4 +271,6 @@ class Migration039(ArbitraryMigration):
|
|
|
271
271
|
)
|
|
272
272
|
progress.update(reconcile_task, advance=1)
|
|
273
273
|
|
|
274
|
+
console.log("IP prefix and address reconciliation complete.")
|
|
275
|
+
|
|
274
276
|
return result
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any
|
|
4
|
+
|
|
5
|
+
from rich import print as rprint
|
|
6
|
+
|
|
7
|
+
from infrahub.core.branch import Branch
|
|
8
|
+
from infrahub.core.diff.repository.repository import DiffRepository
|
|
9
|
+
from infrahub.core.initialization import get_root_node
|
|
10
|
+
from infrahub.core.migrations.shared import MigrationResult
|
|
11
|
+
from infrahub.core.query import Query, QueryType
|
|
12
|
+
from infrahub.dependencies.registry import build_component_registry, get_component_registry
|
|
13
|
+
from infrahub.log import get_logger
|
|
14
|
+
|
|
15
|
+
from ..shared import ArbitraryMigration
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from infrahub.database import InfrahubDatabase
|
|
19
|
+
|
|
20
|
+
log = get_logger()
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class DeletePosthumousEdges(Query):
|
|
24
|
+
name = "delete_posthumous_edges_query"
|
|
25
|
+
type = QueryType.WRITE
|
|
26
|
+
insert_return = False
|
|
27
|
+
|
|
28
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
|
|
29
|
+
query = """
|
|
30
|
+
// ------------
|
|
31
|
+
// find deleted nodes
|
|
32
|
+
// ------------
|
|
33
|
+
MATCH (n:Node)-[e:IS_PART_OF]->(:Root)
|
|
34
|
+
WHERE e.status = "deleted" OR e.to IS NOT NULL
|
|
35
|
+
WITH DISTINCT n, e.branch AS delete_branch, e.branch_level AS delete_branch_level, CASE
|
|
36
|
+
WHEN e.status = "deleted" THEN e.from
|
|
37
|
+
ELSE e.to
|
|
38
|
+
END AS delete_time
|
|
39
|
+
// ------------
|
|
40
|
+
// find the edges added to the deleted node after the delete time
|
|
41
|
+
// ------------
|
|
42
|
+
MATCH (n)-[added_e]-(peer)
|
|
43
|
+
WHERE added_e.from > delete_time
|
|
44
|
+
AND type(added_e) <> "IS_PART_OF"
|
|
45
|
+
// if the node was deleted on a branch (delete_branch_level > 1), and then updated on main/global (added_e.branch_level = 1), we can ignore it
|
|
46
|
+
AND added_e.branch_level >= delete_branch_level
|
|
47
|
+
AND (added_e.branch = delete_branch OR delete_branch_level = 1)
|
|
48
|
+
WITH DISTINCT n, delete_branch, delete_time, added_e, peer
|
|
49
|
+
// ------------
|
|
50
|
+
// get the branched_from for the branch on which the node was deleted
|
|
51
|
+
// ------------
|
|
52
|
+
CALL (added_e) {
|
|
53
|
+
MATCH (b:Branch {name: added_e.branch})
|
|
54
|
+
RETURN b.branched_from AS added_e_branched_from
|
|
55
|
+
}
|
|
56
|
+
// ------------
|
|
57
|
+
// account for the following situations, given that the edge update time is after the node delete time
|
|
58
|
+
// - deleted on main/global, updated on branch
|
|
59
|
+
// - illegal if the delete is before branch.branched_from
|
|
60
|
+
// - deleted on branch, updated on branch
|
|
61
|
+
// - illegal
|
|
62
|
+
// ------------
|
|
63
|
+
WITH n, delete_branch, delete_time, added_e, peer
|
|
64
|
+
WHERE delete_branch = added_e.branch
|
|
65
|
+
OR delete_time < added_e_branched_from
|
|
66
|
+
DELETE added_e
|
|
67
|
+
"""
|
|
68
|
+
self.add_to_query(query)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class DeleteDuplicateEdgesForMigratedKindNodes(Query):
|
|
72
|
+
name = "delete_duplicate_edges_for_migrated_kind_nodes_query"
|
|
73
|
+
type = QueryType.WRITE
|
|
74
|
+
insert_return = False
|
|
75
|
+
|
|
76
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
|
|
77
|
+
query = """
|
|
78
|
+
// ------------
|
|
79
|
+
// get UUIDs for migrated kind/inheritance nodes
|
|
80
|
+
// ------------
|
|
81
|
+
MATCH (n:Node)
|
|
82
|
+
WITH n.uuid AS node_uuid, count(*) AS num_nodes_with_uuid
|
|
83
|
+
WHERE num_nodes_with_uuid > 1
|
|
84
|
+
CALL (node_uuid) {
|
|
85
|
+
// ------------
|
|
86
|
+
// find any Relationships for these nodes
|
|
87
|
+
// ------------
|
|
88
|
+
MATCH (n:Node {uuid: node_uuid})-[:IS_RELATED]-(rel:Relationship)
|
|
89
|
+
WITH DISTINCT rel
|
|
90
|
+
MATCH (rel)-[e]->(peer)
|
|
91
|
+
WITH
|
|
92
|
+
type(e) AS e_type,
|
|
93
|
+
e.branch AS e_branch,
|
|
94
|
+
e.from AS e_from,
|
|
95
|
+
e.to AS e_to,
|
|
96
|
+
e.status AS e_status,
|
|
97
|
+
e.peer AS e_peer,
|
|
98
|
+
CASE
|
|
99
|
+
WHEN startNode(e) = rel THEN "out" ELSE "in"
|
|
100
|
+
END AS direction,
|
|
101
|
+
collect(e) AS duplicate_edges
|
|
102
|
+
WHERE size(duplicate_edges) > 1
|
|
103
|
+
WITH tail(duplicate_edges) AS duplicate_edges_to_delete
|
|
104
|
+
UNWIND duplicate_edges_to_delete AS edge_to_delete
|
|
105
|
+
DELETE edge_to_delete
|
|
106
|
+
} IN TRANSACTIONS OF 500 ROWS
|
|
107
|
+
"""
|
|
108
|
+
self.add_to_query(query)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class Migration041(ArbitraryMigration):
|
|
112
|
+
"""Clean up improper merges that duplicated edges to nodes with migrated kinds
|
|
113
|
+
|
|
114
|
+
- delete all existing diffs b/c they could contain incorrect nodes linking to deleted nodes with migrated kind/inheritance
|
|
115
|
+
- delete all edges added to any nodes AFTER they were deleted on main
|
|
116
|
+
- delete any duplicate edges touching migrated kind/inheritance nodes on main
|
|
117
|
+
"""
|
|
118
|
+
|
|
119
|
+
name: str = "041_deleted_dup_edges"
|
|
120
|
+
minimum_version: int = 40
|
|
121
|
+
|
|
122
|
+
async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
|
|
123
|
+
result = MigrationResult()
|
|
124
|
+
|
|
125
|
+
return result
|
|
126
|
+
|
|
127
|
+
async def execute(self, db: InfrahubDatabase) -> MigrationResult:
|
|
128
|
+
root_node = await get_root_node(db=db)
|
|
129
|
+
default_branch_name = root_node.default_branch
|
|
130
|
+
default_branch = await Branch.get_by_name(db=db, name=default_branch_name)
|
|
131
|
+
|
|
132
|
+
rprint("Deleting all diffs", end="...")
|
|
133
|
+
build_component_registry()
|
|
134
|
+
component_registry = get_component_registry()
|
|
135
|
+
diff_repo = await component_registry.get_component(DiffRepository, db=db, branch=default_branch)
|
|
136
|
+
await diff_repo.delete_all_diff_roots()
|
|
137
|
+
rprint("done")
|
|
138
|
+
|
|
139
|
+
rprint("Deleting edges merged after node deleted", end="...")
|
|
140
|
+
delete_posthumous_edges_query = await DeletePosthumousEdges.init(db=db)
|
|
141
|
+
await delete_posthumous_edges_query.execute(db=db)
|
|
142
|
+
rprint("done")
|
|
143
|
+
|
|
144
|
+
rprint("Deleting duplicate edges for migrated kind/inheritance nodes", end="...")
|
|
145
|
+
delete_duplicate_edges_query = await DeleteDuplicateEdgesForMigratedKindNodes.init(db=db)
|
|
146
|
+
await delete_duplicate_edges_query.execute(db=db)
|
|
147
|
+
rprint("done")
|
|
148
|
+
|
|
149
|
+
return MigrationResult()
|
|
@@ -2,13 +2,12 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
from typing import TYPE_CHECKING, Any
|
|
4
4
|
|
|
5
|
-
from rich.console import Console
|
|
6
5
|
from rich.progress import Progress
|
|
7
6
|
|
|
8
7
|
from infrahub.core.branch.models import Branch
|
|
9
8
|
from infrahub.core.initialization import get_root_node
|
|
10
9
|
from infrahub.core.manager import NodeManager
|
|
11
|
-
from infrahub.core.migrations.shared import MigrationResult
|
|
10
|
+
from infrahub.core.migrations.shared import MigrationResult, get_migration_console
|
|
12
11
|
from infrahub.core.query import Query, QueryType
|
|
13
12
|
from infrahub.core.timestamp import Timestamp
|
|
14
13
|
from infrahub.log import get_logger
|
|
@@ -70,7 +69,7 @@ WITH DISTINCT node.uuid AS node_uuid
|
|
|
70
69
|
return [result.get_as_type("node_uuid", str) for result in self.get_results()]
|
|
71
70
|
|
|
72
71
|
|
|
73
|
-
class
|
|
72
|
+
class Migration042(MigrationRequiringRebase):
|
|
74
73
|
"""
|
|
75
74
|
Save profile attribute values on each node using the profile in the database
|
|
76
75
|
For any profile that has updates on a given branch (including default branch)
|
|
@@ -79,8 +78,8 @@ class Migration041(MigrationRequiringRebase):
|
|
|
79
78
|
- run NodeProfilesApplier.apply_profiles on the node on that branch
|
|
80
79
|
"""
|
|
81
80
|
|
|
82
|
-
name: str = "
|
|
83
|
-
minimum_version: int =
|
|
81
|
+
name: str = "042_profile_attrs_in_db"
|
|
82
|
+
minimum_version: int = 41
|
|
84
83
|
|
|
85
84
|
def _get_profile_applier(self, db: InfrahubDatabase, branch: Branch) -> NodeProfilesApplier:
|
|
86
85
|
return NodeProfilesApplier(db=db, branch=branch)
|
|
@@ -98,7 +97,7 @@ class Migration041(MigrationRequiringRebase):
|
|
|
98
97
|
return await self._do_execute_for_branch(db=db, branch=branch)
|
|
99
98
|
|
|
100
99
|
async def _do_execute_for_branch(self, db: InfrahubDatabase, branch: Branch) -> MigrationResult:
|
|
101
|
-
console =
|
|
100
|
+
console = get_migration_console()
|
|
102
101
|
result = MigrationResult()
|
|
103
102
|
await get_or_load_schema_branch(db=db, branch=branch)
|
|
104
103
|
|
|
@@ -111,7 +110,7 @@ class Migration041(MigrationRequiringRebase):
|
|
|
111
110
|
console.print("done")
|
|
112
111
|
|
|
113
112
|
node_ids_to_update: set[str] = set()
|
|
114
|
-
with Progress() as progress:
|
|
113
|
+
with Progress(console=console) as progress:
|
|
115
114
|
gather_nodes_task = progress.add_task(
|
|
116
115
|
f"Gathering affected objects for each profile on branch {branch.name}...", total=len(profiles_map)
|
|
117
116
|
)
|
|
@@ -121,6 +120,7 @@ class Migration041(MigrationRequiringRebase):
|
|
|
121
120
|
node_peers = await node_relationship_manager.get_db_peers(db=db)
|
|
122
121
|
node_ids_to_update.update(str(peer.peer_id) for peer in node_peers)
|
|
123
122
|
progress.update(gather_nodes_task, advance=1)
|
|
123
|
+
console.log(f"Collected nodes impacted by profiles on branch {branch.name}.")
|
|
124
124
|
|
|
125
125
|
console.print("Identifying nodes with profile updates by branch...", end="")
|
|
126
126
|
get_nodes_with_profile_updates_by_branch_query = await GetNodesWithProfileUpdatesForBranchQuery.init(
|
|
@@ -131,7 +131,8 @@ class Migration041(MigrationRequiringRebase):
|
|
|
131
131
|
console.print("done")
|
|
132
132
|
|
|
133
133
|
right_now = Timestamp()
|
|
134
|
-
|
|
134
|
+
console.log("Applying profiles to nodes...")
|
|
135
|
+
with Progress(console=console) as progress:
|
|
135
136
|
apply_task = progress.add_task("Applying profiles to nodes...", total=len(node_ids_to_update))
|
|
136
137
|
applier = self._get_profile_applier(db=db, branch=branch)
|
|
137
138
|
for node_id in node_ids_to_update:
|
|
@@ -141,5 +142,6 @@ class Migration041(MigrationRequiringRebase):
|
|
|
141
142
|
if updated_field_names:
|
|
142
143
|
await node.save(db=db, fields=updated_field_names, at=right_now)
|
|
143
144
|
progress.update(apply_task, advance=1)
|
|
145
|
+
console.log("Completed applying profiles to nodes.")
|
|
144
146
|
|
|
145
147
|
return result
|
|
@@ -9,7 +9,7 @@ from infrahub.core.branch import Branch
|
|
|
9
9
|
from infrahub.core.constants import SchemaPathType
|
|
10
10
|
from infrahub.core.initialization import get_root_node
|
|
11
11
|
from infrahub.core.migrations.schema.node_attribute_add import NodeAttributeAddMigration
|
|
12
|
-
from infrahub.core.migrations.shared import MigrationRequiringRebase, MigrationResult
|
|
12
|
+
from infrahub.core.migrations.shared import MigrationRequiringRebase, MigrationResult, get_migration_console
|
|
13
13
|
from infrahub.core.path import SchemaPath
|
|
14
14
|
from infrahub.core.query import Query, QueryType
|
|
15
15
|
|
|
@@ -44,9 +44,9 @@ WITH n.kind AS kind, collect(n.uuid) AS node_ids
|
|
|
44
44
|
return node_ids_by_kind
|
|
45
45
|
|
|
46
46
|
|
|
47
|
-
class
|
|
48
|
-
name: str = "
|
|
49
|
-
minimum_version: int =
|
|
47
|
+
class Migration043(MigrationRequiringRebase):
|
|
48
|
+
name: str = "043_create_hfid_display_label_in_db"
|
|
49
|
+
minimum_version: int = 42
|
|
50
50
|
|
|
51
51
|
async def execute(self, db: InfrahubDatabase) -> MigrationResult:
|
|
52
52
|
result = MigrationResult()
|
|
@@ -97,7 +97,7 @@ class Migration042(MigrationRequiringRebase):
|
|
|
97
97
|
]
|
|
98
98
|
)
|
|
99
99
|
|
|
100
|
-
with Progress() as progress:
|
|
100
|
+
with Progress(console=get_migration_console()) as progress:
|
|
101
101
|
update_task = progress.add_task("Adding HFID and display label to nodes", total=len(migrations))
|
|
102
102
|
|
|
103
103
|
for migration in migrations:
|
|
@@ -144,7 +144,7 @@ class Migration042(MigrationRequiringRebase):
|
|
|
144
144
|
]
|
|
145
145
|
)
|
|
146
146
|
|
|
147
|
-
with Progress() as progress:
|
|
147
|
+
with Progress(console=get_migration_console()) as progress:
|
|
148
148
|
update_task = progress.add_task(
|
|
149
149
|
f"Adding HFID and display label to nodes on branch {branch.name}", total=len(migrations)
|
|
150
150
|
)
|
|
@@ -11,7 +11,7 @@ from infrahub.core import registry
|
|
|
11
11
|
from infrahub.core.branch import Branch
|
|
12
12
|
from infrahub.core.constants import GLOBAL_BRANCH_NAME, BranchSupportType, RelationshipDirection
|
|
13
13
|
from infrahub.core.initialization import get_root_node
|
|
14
|
-
from infrahub.core.migrations.shared import MigrationResult
|
|
14
|
+
from infrahub.core.migrations.shared import MigrationResult, get_migration_console
|
|
15
15
|
from infrahub.core.query import Query, QueryType
|
|
16
16
|
from infrahub.types import is_large_attribute_type
|
|
17
17
|
|
|
@@ -25,6 +25,9 @@ if TYPE_CHECKING:
|
|
|
25
25
|
from infrahub.database import InfrahubDatabase
|
|
26
26
|
|
|
27
27
|
|
|
28
|
+
console = get_migration_console()
|
|
29
|
+
|
|
30
|
+
|
|
28
31
|
class DefaultBranchNodeCount(Query):
|
|
29
32
|
"""
|
|
30
33
|
Get the number of Node vertices on the given branches that are not in the kinds_to_skip list
|
|
@@ -599,22 +602,17 @@ CALL (n, attr) {
|
|
|
599
602
|
self.add_to_query(set_value_query)
|
|
600
603
|
|
|
601
604
|
|
|
602
|
-
class
|
|
605
|
+
class Migration044(MigrationRequiringRebase):
|
|
603
606
|
"""
|
|
604
607
|
Backfill `human_friendly_id` and `display_label` attributes for nodes with schemas that define them.
|
|
605
608
|
"""
|
|
606
609
|
|
|
607
|
-
name: str = "
|
|
608
|
-
minimum_version: int =
|
|
610
|
+
name: str = "044_backfill_hfid_display_label_in_db"
|
|
611
|
+
minimum_version: int = 43
|
|
609
612
|
update_batch_size: int = 1000
|
|
610
613
|
# skip these b/c the attributes on these schema-related nodes are used to define the values included in
|
|
611
614
|
# the human_friendly_id and display_label attributes on instances of these schema, so should not be updated
|
|
612
|
-
kinds_to_skip: list[str] = [
|
|
613
|
-
"SchemaNode",
|
|
614
|
-
"SchemaAttribute",
|
|
615
|
-
"SchemaRelationship",
|
|
616
|
-
"SchemaGeneric",
|
|
617
|
-
]
|
|
615
|
+
kinds_to_skip: list[str] = ["SchemaNode", "SchemaAttribute", "SchemaRelationship", "SchemaGeneric"]
|
|
618
616
|
|
|
619
617
|
async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
|
|
620
618
|
return MigrationResult()
|
|
@@ -721,7 +719,7 @@ class Migration043(MigrationRequiringRebase):
|
|
|
721
719
|
hfid_attribute_schema = base_node_schema.get_attribute("human_friendly_id")
|
|
722
720
|
|
|
723
721
|
try:
|
|
724
|
-
with Progress() as progress:
|
|
722
|
+
with Progress(console=console) as progress:
|
|
725
723
|
update_task = progress.add_task(
|
|
726
724
|
f"Set display_label and human_friendly_id for {total_nodes_count} nodes on default branch",
|
|
727
725
|
total=total_nodes_count,
|