infrahub-server 1.4.12__py3-none-any.whl → 1.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/actions/tasks.py +208 -16
- infrahub/api/artifact.py +3 -0
- infrahub/api/diff/diff.py +1 -1
- infrahub/api/internal.py +2 -0
- infrahub/api/query.py +2 -0
- infrahub/api/schema.py +27 -3
- infrahub/auth.py +5 -5
- infrahub/cli/__init__.py +2 -0
- infrahub/cli/db.py +160 -157
- infrahub/cli/dev.py +118 -0
- infrahub/cli/tasks.py +46 -0
- infrahub/cli/upgrade.py +56 -9
- infrahub/computed_attribute/tasks.py +19 -7
- infrahub/config.py +7 -2
- infrahub/core/attribute.py +35 -24
- infrahub/core/branch/enums.py +1 -1
- infrahub/core/branch/models.py +9 -5
- infrahub/core/branch/needs_rebase_status.py +11 -0
- infrahub/core/branch/tasks.py +72 -10
- infrahub/core/changelog/models.py +2 -10
- infrahub/core/constants/__init__.py +4 -0
- infrahub/core/constants/infrahubkind.py +1 -0
- infrahub/core/convert_object_type/object_conversion.py +201 -0
- infrahub/core/convert_object_type/repository_conversion.py +89 -0
- infrahub/core/convert_object_type/schema_mapping.py +27 -3
- infrahub/core/diff/calculator.py +2 -2
- infrahub/core/diff/model/path.py +4 -0
- infrahub/core/diff/payload_builder.py +1 -1
- infrahub/core/diff/query/artifact.py +1 -0
- infrahub/core/diff/query/delete_query.py +9 -5
- infrahub/core/diff/query/field_summary.py +1 -0
- infrahub/core/diff/query/merge.py +39 -23
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/initialization.py +7 -4
- infrahub/core/manager.py +3 -81
- infrahub/core/migrations/__init__.py +3 -0
- infrahub/core/migrations/exceptions.py +4 -0
- infrahub/core/migrations/graph/__init__.py +13 -10
- infrahub/core/migrations/graph/load_schema_branch.py +21 -0
- infrahub/core/migrations/graph/m013_convert_git_password_credential.py +1 -1
- infrahub/core/migrations/graph/m037_index_attr_vals.py +11 -30
- infrahub/core/migrations/graph/m039_ipam_reconcile.py +9 -7
- infrahub/core/migrations/graph/m041_deleted_dup_edges.py +149 -0
- infrahub/core/migrations/graph/m042_profile_attrs_in_db.py +147 -0
- infrahub/core/migrations/graph/m043_create_hfid_display_label_in_db.py +164 -0
- infrahub/core/migrations/graph/m044_backfill_hfid_display_label_in_db.py +864 -0
- infrahub/core/migrations/query/__init__.py +7 -8
- infrahub/core/migrations/query/attribute_add.py +8 -6
- infrahub/core/migrations/query/attribute_remove.py +134 -0
- infrahub/core/migrations/runner.py +54 -0
- infrahub/core/migrations/schema/attribute_kind_update.py +9 -3
- infrahub/core/migrations/schema/attribute_supports_profile.py +90 -0
- infrahub/core/migrations/schema/node_attribute_add.py +26 -5
- infrahub/core/migrations/schema/node_attribute_remove.py +13 -109
- infrahub/core/migrations/schema/node_kind_update.py +2 -1
- infrahub/core/migrations/schema/node_remove.py +2 -1
- infrahub/core/migrations/schema/placeholder_dummy.py +3 -2
- infrahub/core/migrations/shared.py +66 -19
- infrahub/core/models.py +2 -2
- infrahub/core/node/__init__.py +207 -54
- infrahub/core/node/create.py +53 -49
- infrahub/core/node/lock_utils.py +124 -0
- infrahub/core/node/node_property_attribute.py +230 -0
- infrahub/core/node/resource_manager/ip_address_pool.py +2 -1
- infrahub/core/node/resource_manager/ip_prefix_pool.py +2 -1
- infrahub/core/node/resource_manager/number_pool.py +2 -1
- infrahub/core/node/standard.py +1 -1
- infrahub/core/property.py +11 -0
- infrahub/core/protocols.py +8 -1
- infrahub/core/query/attribute.py +82 -15
- infrahub/core/query/diff.py +61 -16
- infrahub/core/query/ipam.py +16 -4
- infrahub/core/query/node.py +92 -212
- infrahub/core/query/relationship.py +44 -26
- infrahub/core/query/subquery.py +0 -8
- infrahub/core/relationship/model.py +69 -24
- infrahub/core/schema/__init__.py +56 -0
- infrahub/core/schema/attribute_schema.py +4 -2
- infrahub/core/schema/basenode_schema.py +42 -2
- infrahub/core/schema/definitions/core/__init__.py +2 -0
- infrahub/core/schema/definitions/core/check.py +1 -1
- infrahub/core/schema/definitions/core/generator.py +2 -0
- infrahub/core/schema/definitions/core/group.py +16 -2
- infrahub/core/schema/definitions/core/repository.py +7 -0
- infrahub/core/schema/definitions/core/transform.py +1 -1
- infrahub/core/schema/definitions/internal.py +12 -3
- infrahub/core/schema/generated/attribute_schema.py +2 -2
- infrahub/core/schema/generated/base_node_schema.py +6 -1
- infrahub/core/schema/manager.py +3 -0
- infrahub/core/schema/node_schema.py +1 -0
- infrahub/core/schema/relationship_schema.py +0 -1
- infrahub/core/schema/schema_branch.py +295 -10
- infrahub/core/schema/schema_branch_display.py +135 -0
- infrahub/core/schema/schema_branch_hfid.py +120 -0
- infrahub/core/validators/aggregated_checker.py +1 -1
- infrahub/database/graph.py +21 -0
- infrahub/display_labels/__init__.py +0 -0
- infrahub/display_labels/gather.py +48 -0
- infrahub/display_labels/models.py +240 -0
- infrahub/display_labels/tasks.py +192 -0
- infrahub/display_labels/triggers.py +22 -0
- infrahub/events/branch_action.py +27 -1
- infrahub/events/group_action.py +1 -1
- infrahub/events/node_action.py +1 -1
- infrahub/generators/constants.py +7 -0
- infrahub/generators/models.py +38 -12
- infrahub/generators/tasks.py +34 -16
- infrahub/git/base.py +42 -2
- infrahub/git/integrator.py +22 -14
- infrahub/git/tasks.py +52 -2
- infrahub/graphql/analyzer.py +9 -0
- infrahub/graphql/api/dependencies.py +2 -4
- infrahub/graphql/api/endpoints.py +16 -6
- infrahub/graphql/app.py +2 -4
- infrahub/graphql/initialization.py +2 -3
- infrahub/graphql/manager.py +213 -137
- infrahub/graphql/middleware.py +12 -0
- infrahub/graphql/mutations/branch.py +16 -0
- infrahub/graphql/mutations/computed_attribute.py +110 -3
- infrahub/graphql/mutations/convert_object_type.py +44 -13
- infrahub/graphql/mutations/display_label.py +118 -0
- infrahub/graphql/mutations/generator.py +25 -7
- infrahub/graphql/mutations/hfid.py +125 -0
- infrahub/graphql/mutations/ipam.py +73 -41
- infrahub/graphql/mutations/main.py +61 -178
- infrahub/graphql/mutations/profile.py +195 -0
- infrahub/graphql/mutations/proposed_change.py +8 -1
- infrahub/graphql/mutations/relationship.py +2 -2
- infrahub/graphql/mutations/repository.py +22 -83
- infrahub/graphql/mutations/resource_manager.py +2 -2
- infrahub/graphql/mutations/webhook.py +1 -1
- infrahub/graphql/queries/resource_manager.py +1 -1
- infrahub/graphql/registry.py +173 -0
- infrahub/graphql/resolvers/resolver.py +2 -0
- infrahub/graphql/schema.py +8 -1
- infrahub/graphql/schema_sort.py +170 -0
- infrahub/graphql/types/branch.py +4 -1
- infrahub/graphql/types/enums.py +3 -0
- infrahub/groups/tasks.py +1 -1
- infrahub/hfid/__init__.py +0 -0
- infrahub/hfid/gather.py +48 -0
- infrahub/hfid/models.py +240 -0
- infrahub/hfid/tasks.py +191 -0
- infrahub/hfid/triggers.py +22 -0
- infrahub/lock.py +119 -42
- infrahub/locks/__init__.py +0 -0
- infrahub/locks/tasks.py +37 -0
- infrahub/message_bus/types.py +1 -0
- infrahub/patch/plan_writer.py +2 -2
- infrahub/permissions/constants.py +2 -0
- infrahub/profiles/__init__.py +0 -0
- infrahub/profiles/node_applier.py +101 -0
- infrahub/profiles/queries/__init__.py +0 -0
- infrahub/profiles/queries/get_profile_data.py +98 -0
- infrahub/profiles/tasks.py +63 -0
- infrahub/proposed_change/tasks.py +67 -14
- infrahub/repositories/__init__.py +0 -0
- infrahub/repositories/create_repository.py +113 -0
- infrahub/server.py +9 -1
- infrahub/services/__init__.py +8 -5
- infrahub/services/adapters/http/__init__.py +5 -0
- infrahub/services/adapters/workflow/worker.py +14 -3
- infrahub/task_manager/event.py +5 -0
- infrahub/task_manager/models.py +7 -0
- infrahub/task_manager/task.py +73 -0
- infrahub/tasks/registry.py +6 -4
- infrahub/trigger/catalogue.py +4 -0
- infrahub/trigger/models.py +2 -0
- infrahub/trigger/setup.py +13 -4
- infrahub/trigger/tasks.py +6 -0
- infrahub/webhook/models.py +1 -1
- infrahub/workers/dependencies.py +3 -1
- infrahub/workers/infrahub_async.py +10 -2
- infrahub/workflows/catalogue.py +118 -3
- infrahub/workflows/initialization.py +21 -0
- infrahub/workflows/models.py +17 -2
- infrahub/workflows/utils.py +2 -1
- infrahub_sdk/branch.py +17 -8
- infrahub_sdk/checks.py +1 -1
- infrahub_sdk/client.py +376 -95
- infrahub_sdk/config.py +29 -2
- infrahub_sdk/convert_object_type.py +61 -0
- infrahub_sdk/ctl/branch.py +3 -0
- infrahub_sdk/ctl/check.py +2 -3
- infrahub_sdk/ctl/cli_commands.py +20 -12
- infrahub_sdk/ctl/config.py +8 -2
- infrahub_sdk/ctl/generator.py +6 -3
- infrahub_sdk/ctl/graphql.py +184 -0
- infrahub_sdk/ctl/repository.py +39 -1
- infrahub_sdk/ctl/schema.py +40 -10
- infrahub_sdk/ctl/task.py +110 -0
- infrahub_sdk/ctl/utils.py +4 -0
- infrahub_sdk/ctl/validate.py +5 -3
- infrahub_sdk/diff.py +4 -5
- infrahub_sdk/exceptions.py +2 -0
- infrahub_sdk/generator.py +7 -1
- infrahub_sdk/graphql/__init__.py +12 -0
- infrahub_sdk/graphql/constants.py +1 -0
- infrahub_sdk/graphql/plugin.py +85 -0
- infrahub_sdk/graphql/query.py +77 -0
- infrahub_sdk/{graphql.py → graphql/renderers.py} +88 -75
- infrahub_sdk/graphql/utils.py +40 -0
- infrahub_sdk/node/attribute.py +2 -0
- infrahub_sdk/node/node.py +28 -20
- infrahub_sdk/node/relationship.py +1 -3
- infrahub_sdk/playback.py +1 -2
- infrahub_sdk/protocols.py +54 -6
- infrahub_sdk/pytest_plugin/plugin.py +7 -4
- infrahub_sdk/pytest_plugin/utils.py +40 -0
- infrahub_sdk/repository.py +1 -2
- infrahub_sdk/schema/__init__.py +70 -4
- infrahub_sdk/schema/main.py +1 -0
- infrahub_sdk/schema/repository.py +8 -0
- infrahub_sdk/spec/models.py +7 -0
- infrahub_sdk/spec/object.py +54 -6
- infrahub_sdk/spec/processors/__init__.py +0 -0
- infrahub_sdk/spec/processors/data_processor.py +10 -0
- infrahub_sdk/spec/processors/factory.py +34 -0
- infrahub_sdk/spec/processors/range_expand_processor.py +56 -0
- infrahub_sdk/spec/range_expansion.py +118 -0
- infrahub_sdk/task/models.py +6 -4
- infrahub_sdk/timestamp.py +18 -6
- infrahub_sdk/transforms.py +1 -1
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.5.0.dist-info}/METADATA +9 -10
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.5.0.dist-info}/RECORD +233 -176
- infrahub_testcontainers/container.py +114 -2
- infrahub_testcontainers/docker-compose-cluster.test.yml +5 -0
- infrahub_testcontainers/docker-compose.test.yml +5 -0
- infrahub_testcontainers/models.py +2 -2
- infrahub_testcontainers/performance_test.py +4 -4
- infrahub/core/convert_object_type/conversion.py +0 -134
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.5.0.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.5.0.dist-info}/WHEEL +0 -0
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.5.0.dist-info}/entry_points.txt +0 -0
|
@@ -49,11 +49,18 @@ WITH node_diff_map, is_node_kind_migration, CASE
|
|
|
49
49
|
WHEN is_node_kind_migration THEN $migrated_kinds_id_map[node_diff_map.uuid]
|
|
50
50
|
ELSE NULL
|
|
51
51
|
END AS node_db_id
|
|
52
|
+
|
|
53
|
+
// ------------------------------
|
|
54
|
+
// find the correct Node if the Node had its kind/inheritance migrated
|
|
55
|
+
// and there are multiple Nodes with the same UUID
|
|
56
|
+
// ------------------------------
|
|
52
57
|
CALL (node_diff_map, node_db_id) {
|
|
53
|
-
MATCH (n:Node {uuid: node_diff_map.uuid})
|
|
54
|
-
WHERE node_db_id IS NULL
|
|
55
|
-
|
|
58
|
+
MATCH (n:Node {uuid: node_diff_map.uuid})-[n_is_part_of:IS_PART_OF]->(:Root)
|
|
59
|
+
WHERE node_db_id IS NULL OR %(id_func)s(n) = node_db_id
|
|
60
|
+
AND n_is_part_of.branch IN [$source_branch, $target_branch]
|
|
56
61
|
RETURN n
|
|
62
|
+
ORDER BY n_is_part_of.branch_level DESC, n_is_part_of.from DESC, n_is_part_of.status ASC
|
|
63
|
+
LIMIT 1
|
|
57
64
|
}
|
|
58
65
|
WITH n, node_diff_map, is_node_kind_migration
|
|
59
66
|
CALL (n, node_diff_map, is_node_kind_migration) {
|
|
@@ -224,16 +231,28 @@ CALL (n, node_diff_map, is_node_kind_migration) {
|
|
|
224
231
|
ELSE NULL
|
|
225
232
|
END AS rel_peer_db_id
|
|
226
233
|
// ------------------------------
|
|
234
|
+
// find the correct relationship peer if the peer had its kind/inheritance migrated
|
|
235
|
+
// and there are multiple Nodes with the same UUID
|
|
236
|
+
// ------------------------------
|
|
237
|
+
CALL (rel_peer_id, rel_peer_db_id) {
|
|
238
|
+
MATCH (rel_peer:Node {uuid: rel_peer_id})-[target_is_part_of:IS_PART_OF]->(:Root)
|
|
239
|
+
WHERE (rel_peer_db_id IS NULL OR %(id_func)s(rel_peer) = rel_peer_db_id)
|
|
240
|
+
AND target_is_part_of.branch IN [$source_branch, $target_branch]
|
|
241
|
+
RETURN rel_peer
|
|
242
|
+
ORDER BY target_is_part_of.branch_level DESC, target_is_part_of.from DESC, target_is_part_of.status ASC
|
|
243
|
+
LIMIT 1
|
|
244
|
+
}
|
|
245
|
+
WITH rel_name, related_rel_status, rel_peer
|
|
246
|
+
// ------------------------------
|
|
227
247
|
// determine the directions of each IS_RELATED
|
|
228
248
|
// ------------------------------
|
|
229
|
-
CALL (n, rel_name,
|
|
249
|
+
CALL (n, rel_name, rel_peer, related_rel_status) {
|
|
230
250
|
MATCH (n)
|
|
231
251
|
-[source_r_rel_1:IS_RELATED]
|
|
232
252
|
-(r:Relationship {name: rel_name})
|
|
233
253
|
-[source_r_rel_2:IS_RELATED]
|
|
234
|
-
-(rel_peer
|
|
235
|
-
WHERE
|
|
236
|
-
AND source_r_rel_1.branch IN [$source_branch, $target_branch]
|
|
254
|
+
-(rel_peer)
|
|
255
|
+
WHERE source_r_rel_1.branch IN [$source_branch, $target_branch]
|
|
237
256
|
AND source_r_rel_2.branch IN [$source_branch, $target_branch]
|
|
238
257
|
AND source_r_rel_1.from <= $at AND source_r_rel_1.to IS NULL
|
|
239
258
|
AND source_r_rel_2.from <= $at AND source_r_rel_2.to IS NULL
|
|
@@ -251,37 +270,34 @@ CALL (n, node_diff_map, is_node_kind_migration) {
|
|
|
251
270
|
source_r_rel_1.hierarchy AS r1_hierarchy,
|
|
252
271
|
source_r_rel_2.hierarchy AS r2_hierarchy
|
|
253
272
|
}
|
|
254
|
-
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name,
|
|
255
|
-
CALL (n, rel_name,
|
|
273
|
+
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status
|
|
274
|
+
CALL (n, rel_name, rel_peer, related_rel_status) {
|
|
256
275
|
OPTIONAL MATCH (n)
|
|
257
276
|
-[target_r_rel_1:IS_RELATED {branch: $target_branch, status: "active"}]
|
|
258
277
|
-(:Relationship {name: rel_name})
|
|
259
278
|
-[target_r_rel_2:IS_RELATED {branch: $target_branch, status: "active"}]
|
|
260
|
-
-(rel_peer
|
|
279
|
+
-(rel_peer)
|
|
261
280
|
WHERE related_rel_status = "deleted"
|
|
262
|
-
AND (rel_peer_db_id IS NULL OR %(id_func)s(rel_peer) = rel_peer_db_id)
|
|
263
281
|
AND target_r_rel_1.from <= $at AND target_r_rel_1.to IS NULL
|
|
264
282
|
AND target_r_rel_2.from <= $at AND target_r_rel_2.to IS NULL
|
|
265
283
|
SET target_r_rel_1.to = $at
|
|
266
284
|
SET target_r_rel_2.to = $at
|
|
267
285
|
}
|
|
268
|
-
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name,
|
|
286
|
+
WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status
|
|
269
287
|
// ------------------------------
|
|
270
288
|
// conditionally create new IS_RELATED relationships on target_branch, if necessary
|
|
271
289
|
// ------------------------------
|
|
272
|
-
CALL (n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name,
|
|
273
|
-
MATCH (p:Node {uuid: rel_peer_id})
|
|
274
|
-
WHERE rel_peer_db_id IS NULL OR %(id_func)s(p) = rel_peer_db_id
|
|
290
|
+
CALL (n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status) {
|
|
275
291
|
OPTIONAL MATCH (n)
|
|
276
292
|
-[r_rel_1:IS_RELATED {branch: $target_branch, status: related_rel_status}]
|
|
277
293
|
-(:Relationship {name: rel_name})
|
|
278
294
|
-[r_rel_2:IS_RELATED {branch: $target_branch, status: related_rel_status}]
|
|
279
|
-
-(
|
|
295
|
+
-(rel_peer)
|
|
280
296
|
WHERE r_rel_1.from <= $at
|
|
281
297
|
AND (r_rel_1.to >= $at OR r_rel_1.to IS NULL)
|
|
282
298
|
AND r_rel_2.from <= $at
|
|
283
299
|
AND (r_rel_2.to >= $at OR r_rel_2.to IS NULL)
|
|
284
|
-
WITH
|
|
300
|
+
WITH rel_peer, r_rel_1, r_rel_2
|
|
285
301
|
WHERE r_rel_1 IS NULL
|
|
286
302
|
AND r_rel_2 IS NULL
|
|
287
303
|
// ------------------------------
|
|
@@ -301,19 +317,19 @@ CALL (n, node_diff_map, is_node_kind_migration) {
|
|
|
301
317
|
<-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r1_hierarchy}]
|
|
302
318
|
-(r)
|
|
303
319
|
}
|
|
304
|
-
CALL (r,
|
|
305
|
-
WITH r,
|
|
320
|
+
CALL (r, rel_peer, r2_dir, r2_hierarchy, related_rel_status) {
|
|
321
|
+
WITH r, rel_peer, r2_dir, r2_hierarchy, related_rel_status
|
|
306
322
|
WHERE r2_dir = "r"
|
|
307
323
|
CREATE (r)
|
|
308
324
|
-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r2_hierarchy}]
|
|
309
|
-
->(
|
|
325
|
+
->(rel_peer)
|
|
310
326
|
}
|
|
311
|
-
CALL (r,
|
|
312
|
-
WITH r,
|
|
327
|
+
CALL (r, rel_peer, r2_dir, r2_hierarchy, related_rel_status) {
|
|
328
|
+
WITH r, rel_peer, r2_dir, r2_hierarchy, related_rel_status
|
|
313
329
|
WHERE r2_dir = "l"
|
|
314
330
|
CREATE (r)
|
|
315
331
|
<-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r2_hierarchy}]
|
|
316
|
-
-(
|
|
332
|
+
-(rel_peer)
|
|
317
333
|
}
|
|
318
334
|
}
|
|
319
335
|
}
|
infrahub/core/graph/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
GRAPH_VERSION =
|
|
1
|
+
GRAPH_VERSION = 44
|
infrahub/core/initialization.py
CHANGED
|
@@ -34,7 +34,7 @@ from infrahub.database import InfrahubDatabase
|
|
|
34
34
|
from infrahub.database.memgraph import IndexManagerMemgraph
|
|
35
35
|
from infrahub.database.neo4j import IndexManagerNeo4j
|
|
36
36
|
from infrahub.exceptions import DatabaseError
|
|
37
|
-
from infrahub.graphql.manager import
|
|
37
|
+
from infrahub.graphql.manager import registry as graphql_registry
|
|
38
38
|
from infrahub.log import get_logger
|
|
39
39
|
from infrahub.menu.utils import create_default_menu
|
|
40
40
|
from infrahub.permissions import PermissionBackend, get_or_create_global_permission
|
|
@@ -50,7 +50,7 @@ async def get_root_node(db: InfrahubDatabase, initialize: bool = False) -> Root:
|
|
|
50
50
|
roots = await Root.get_list(db=db)
|
|
51
51
|
if len(roots) == 0 and not initialize:
|
|
52
52
|
raise DatabaseError(
|
|
53
|
-
"The Database hasn't been initialized for Infrahub, please
|
|
53
|
+
"The Database hasn't been initialized for Infrahub, please 'infrahub server start' to initialize the database."
|
|
54
54
|
)
|
|
55
55
|
|
|
56
56
|
if len(roots) == 0:
|
|
@@ -137,7 +137,8 @@ async def add_indexes(db: InfrahubDatabase) -> None:
|
|
|
137
137
|
await index_manager.add()
|
|
138
138
|
|
|
139
139
|
|
|
140
|
-
async def initialization(db: InfrahubDatabase, add_database_indexes: bool = False) ->
|
|
140
|
+
async def initialization(db: InfrahubDatabase, add_database_indexes: bool = False) -> bool:
|
|
141
|
+
"""Run initialization and setup, returns a boolean to indicate if it's the initial setup."""
|
|
141
142
|
if config.SETTINGS.database.db_type == config.DatabaseType.MEMGRAPH:
|
|
142
143
|
session = await db.session()
|
|
143
144
|
await session.run(query="SET DATABASE SETTING 'log.level' TO 'INFO'")
|
|
@@ -148,6 +149,7 @@ async def initialization(db: InfrahubDatabase, add_database_indexes: bool = Fals
|
|
|
148
149
|
# Initialize the database and Load the Root node
|
|
149
150
|
# ---------------------------------------------------
|
|
150
151
|
async with lock.registry.initialization():
|
|
152
|
+
first_time_initialization = len(await Root.get_list(db=db)) == 0
|
|
151
153
|
log.debug("Checking Root Node")
|
|
152
154
|
await initialize_registry(db=db, initialize=True)
|
|
153
155
|
|
|
@@ -196,7 +198,7 @@ async def initialization(db: InfrahubDatabase, add_database_indexes: bool = Fals
|
|
|
196
198
|
|
|
197
199
|
default_branch = registry.get_branch_from_registry(branch=registry.default_branch)
|
|
198
200
|
schema_branch = registry.schema.get_schema_branch(name=default_branch.name)
|
|
199
|
-
gqlm =
|
|
201
|
+
gqlm = graphql_registry.get_manager_for_branch(branch=default_branch, schema_branch=schema_branch)
|
|
200
202
|
gqlm.get_graphql_schema(
|
|
201
203
|
include_query=True,
|
|
202
204
|
include_mutation=True,
|
|
@@ -210,6 +212,7 @@ async def initialization(db: InfrahubDatabase, add_database_indexes: bool = Fals
|
|
|
210
212
|
ip_namespace = await get_default_ipnamespace(db=db)
|
|
211
213
|
if ip_namespace:
|
|
212
214
|
registry.default_ipnamespace = ip_namespace.id
|
|
215
|
+
return first_time_initialization
|
|
213
216
|
|
|
214
217
|
|
|
215
218
|
async def create_root_node(db: InfrahubDatabase) -> Root:
|
infrahub/core/manager.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
from copy import copy
|
|
4
|
-
from functools import reduce
|
|
5
4
|
from typing import TYPE_CHECKING, Any, Iterable, Literal, TypeVar, overload
|
|
6
5
|
|
|
7
6
|
from infrahub_sdk.utils import deep_merge_dict, is_valid_uuid
|
|
@@ -11,9 +10,7 @@ from infrahub.core.node import Node
|
|
|
11
10
|
from infrahub.core.node.delete_validator import NodeDeleteValidator
|
|
12
11
|
from infrahub.core.query.node import (
|
|
13
12
|
AttributeFromDB,
|
|
14
|
-
AttributeNodePropertyFromDB,
|
|
15
13
|
GroupedPeerNodes,
|
|
16
|
-
NodeAttributesFromDB,
|
|
17
14
|
NodeGetHierarchyQuery,
|
|
18
15
|
NodeGetListQuery,
|
|
19
16
|
NodeListGetAttributeQuery,
|
|
@@ -78,60 +75,6 @@ def get_schema(
|
|
|
78
75
|
return node_schema
|
|
79
76
|
|
|
80
77
|
|
|
81
|
-
class ProfileAttributeIndex:
|
|
82
|
-
def __init__(
|
|
83
|
-
self,
|
|
84
|
-
profile_attributes_id_map: dict[str, NodeAttributesFromDB],
|
|
85
|
-
profile_ids_by_node_id: dict[str, list[str]],
|
|
86
|
-
) -> None:
|
|
87
|
-
self._profile_attributes_id_map = profile_attributes_id_map
|
|
88
|
-
self._profile_ids_by_node_id = profile_ids_by_node_id
|
|
89
|
-
|
|
90
|
-
def apply_profiles(self, node_data_dict: dict[str, Any]) -> dict[str, Any]:
|
|
91
|
-
updated_data: dict[str, Any] = {**node_data_dict}
|
|
92
|
-
node_id = node_data_dict.get("id")
|
|
93
|
-
profile_ids = self._profile_ids_by_node_id.get(node_id, [])
|
|
94
|
-
if not profile_ids:
|
|
95
|
-
return updated_data
|
|
96
|
-
profiles = [
|
|
97
|
-
self._profile_attributes_id_map[p_id] for p_id in profile_ids if p_id in self._profile_attributes_id_map
|
|
98
|
-
]
|
|
99
|
-
|
|
100
|
-
def get_profile_priority(nafd: NodeAttributesFromDB) -> tuple[int | float, str]:
|
|
101
|
-
try:
|
|
102
|
-
return (int(nafd.attrs.get("profile_priority").value), nafd.node.get("uuid"))
|
|
103
|
-
except (TypeError, AttributeError):
|
|
104
|
-
return (float("inf"), "")
|
|
105
|
-
|
|
106
|
-
profiles.sort(key=get_profile_priority)
|
|
107
|
-
|
|
108
|
-
for attr_name, attr_data in updated_data.items():
|
|
109
|
-
if not isinstance(attr_data, AttributeFromDB):
|
|
110
|
-
continue
|
|
111
|
-
if not attr_data.is_default:
|
|
112
|
-
continue
|
|
113
|
-
profile_value, profile_uuid = None, None
|
|
114
|
-
index = 0
|
|
115
|
-
|
|
116
|
-
while profile_value is None and index <= (len(profiles) - 1):
|
|
117
|
-
try:
|
|
118
|
-
profile_value = profiles[index].attrs[attr_name].value
|
|
119
|
-
if profile_value != "NULL":
|
|
120
|
-
profile_uuid = profiles[index].node["uuid"]
|
|
121
|
-
break
|
|
122
|
-
profile_value = None
|
|
123
|
-
except (IndexError, KeyError, AttributeError):
|
|
124
|
-
...
|
|
125
|
-
index += 1
|
|
126
|
-
|
|
127
|
-
if profile_value is not None:
|
|
128
|
-
attr_data.value = profile_value
|
|
129
|
-
attr_data.is_from_profile = True
|
|
130
|
-
attr_data.is_default = False
|
|
131
|
-
attr_data.node_properties["source"] = AttributeNodePropertyFromDB(uuid=profile_uuid, labels=[])
|
|
132
|
-
return updated_data
|
|
133
|
-
|
|
134
|
-
|
|
135
78
|
class NodeManager:
|
|
136
79
|
@overload
|
|
137
80
|
@classmethod
|
|
@@ -1132,21 +1075,11 @@ class NodeManager:
|
|
|
1132
1075
|
)
|
|
1133
1076
|
await query.execute(db=db)
|
|
1134
1077
|
nodes_info_by_id: dict[str, NodeToProcess] = {node.node_uuid: node async for node in query.get_nodes(db=db)}
|
|
1135
|
-
profile_ids_by_node_id = query.get_profile_ids_by_node_id()
|
|
1136
|
-
all_profile_ids = reduce(
|
|
1137
|
-
lambda all_ids, these_ids: all_ids | set(these_ids), profile_ids_by_node_id.values(), set()
|
|
1138
|
-
)
|
|
1139
|
-
|
|
1140
|
-
if fields and all_profile_ids:
|
|
1141
|
-
if "profile_priority" not in fields:
|
|
1142
|
-
fields["profile_priority"] = {}
|
|
1143
|
-
if "value" not in fields["profile_priority"]:
|
|
1144
|
-
fields["profile_priority"]["value"] = None
|
|
1145
1078
|
|
|
1146
1079
|
# Query list of all Attributes
|
|
1147
1080
|
query = await NodeListGetAttributeQuery.init(
|
|
1148
1081
|
db=db,
|
|
1149
|
-
ids=list(nodes_info_by_id.keys())
|
|
1082
|
+
ids=list(nodes_info_by_id.keys()),
|
|
1150
1083
|
fields=fields,
|
|
1151
1084
|
branch=branch,
|
|
1152
1085
|
include_source=include_source,
|
|
@@ -1156,17 +1089,7 @@ class NodeManager:
|
|
|
1156
1089
|
branch_agnostic=branch_agnostic,
|
|
1157
1090
|
)
|
|
1158
1091
|
await query.execute(db=db)
|
|
1159
|
-
|
|
1160
|
-
profile_attributes: dict[str, dict[str, AttributeFromDB]] = {}
|
|
1161
|
-
node_attributes: dict[str, dict[str, AttributeFromDB]] = {}
|
|
1162
|
-
for node_id, attribute_dict in all_node_attributes.items():
|
|
1163
|
-
if node_id in all_profile_ids:
|
|
1164
|
-
profile_attributes[node_id] = attribute_dict
|
|
1165
|
-
else:
|
|
1166
|
-
node_attributes[node_id] = attribute_dict
|
|
1167
|
-
profile_index = ProfileAttributeIndex(
|
|
1168
|
-
profile_attributes_id_map=profile_attributes, profile_ids_by_node_id=profile_ids_by_node_id
|
|
1169
|
-
)
|
|
1092
|
+
node_attributes = query.get_attributes_group_by_node()
|
|
1170
1093
|
|
|
1171
1094
|
nodes: dict[str, Node] = {}
|
|
1172
1095
|
|
|
@@ -1195,11 +1118,10 @@ class NodeManager:
|
|
|
1195
1118
|
for attr_name, attr in node_attributes[node_id].attrs.items():
|
|
1196
1119
|
new_node_data[attr_name] = attr
|
|
1197
1120
|
|
|
1198
|
-
new_node_data_with_profile_overrides = profile_index.apply_profiles(new_node_data)
|
|
1199
1121
|
node_class = identify_node_class(node=node)
|
|
1200
1122
|
node_branch = await registry.get_branch(db=db, branch=node.branch)
|
|
1201
1123
|
item = await node_class.init(schema=node.schema, branch=node_branch, at=at, db=db)
|
|
1202
|
-
await item.load(**
|
|
1124
|
+
await item.load(**new_node_data, db=db)
|
|
1203
1125
|
|
|
1204
1126
|
nodes[node_id] = item
|
|
1205
1127
|
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from .schema.attribute_kind_update import AttributeKindUpdateMigration
|
|
2
2
|
from .schema.attribute_name_update import AttributeNameUpdateMigration
|
|
3
|
+
from .schema.attribute_supports_profile import AttributeSupportsProfileUpdateMigration
|
|
3
4
|
from .schema.node_attribute_add import NodeAttributeAddMigration
|
|
4
5
|
from .schema.node_attribute_remove import NodeAttributeRemoveMigration
|
|
5
6
|
from .schema.node_kind_update import NodeKindUpdateMigration
|
|
@@ -19,6 +20,8 @@ MIGRATION_MAP: dict[str, type[SchemaMigration] | None] = {
|
|
|
19
20
|
"attribute.name.update": AttributeNameUpdateMigration,
|
|
20
21
|
"attribute.branch.update": None,
|
|
21
22
|
"attribute.kind.update": AttributeKindUpdateMigration,
|
|
23
|
+
"attribute.optional.update": AttributeSupportsProfileUpdateMigration,
|
|
24
|
+
"attribute.read_only.update": AttributeSupportsProfileUpdateMigration,
|
|
22
25
|
"relationship.branch.update": None,
|
|
23
26
|
"relationship.direction.update": None,
|
|
24
27
|
"relationship.identifier.update": PlaceholderDummyMigration,
|
|
@@ -42,13 +42,16 @@ from .m037_index_attr_vals import Migration037
|
|
|
42
42
|
from .m038_redo_0000_prefix_fix import Migration038
|
|
43
43
|
from .m039_ipam_reconcile import Migration039
|
|
44
44
|
from .m040_duplicated_attributes import Migration040
|
|
45
|
+
from .m041_deleted_dup_edges import Migration041
|
|
46
|
+
from .m042_profile_attrs_in_db import Migration042
|
|
47
|
+
from .m043_create_hfid_display_label_in_db import Migration043
|
|
48
|
+
from .m044_backfill_hfid_display_label_in_db import Migration044
|
|
45
49
|
|
|
46
50
|
if TYPE_CHECKING:
|
|
47
|
-
from
|
|
51
|
+
from ..shared import MigrationTypes
|
|
48
52
|
|
|
49
|
-
from ..shared import ArbitraryMigration, GraphMigration, InternalSchemaMigration
|
|
50
53
|
|
|
51
|
-
MIGRATIONS: list[type[
|
|
54
|
+
MIGRATIONS: list[type[MigrationTypes]] = [
|
|
52
55
|
Migration001,
|
|
53
56
|
Migration002,
|
|
54
57
|
Migration003,
|
|
@@ -89,25 +92,25 @@ MIGRATIONS: list[type[GraphMigration | InternalSchemaMigration | ArbitraryMigrat
|
|
|
89
92
|
Migration038,
|
|
90
93
|
Migration039,
|
|
91
94
|
Migration040,
|
|
95
|
+
Migration041,
|
|
96
|
+
Migration042,
|
|
97
|
+
Migration043,
|
|
98
|
+
Migration044,
|
|
92
99
|
]
|
|
93
100
|
|
|
94
101
|
|
|
95
|
-
async def get_graph_migrations(
|
|
96
|
-
root: Root,
|
|
97
|
-
) -> Sequence[GraphMigration | InternalSchemaMigration | ArbitraryMigration]:
|
|
102
|
+
async def get_graph_migrations(current_graph_version: int) -> Sequence[MigrationTypes]:
|
|
98
103
|
applicable_migrations = []
|
|
99
104
|
for migration_class in MIGRATIONS:
|
|
100
105
|
migration = migration_class.init()
|
|
101
|
-
if
|
|
106
|
+
if current_graph_version > migration.minimum_version:
|
|
102
107
|
continue
|
|
103
108
|
applicable_migrations.append(migration)
|
|
104
109
|
|
|
105
110
|
return applicable_migrations
|
|
106
111
|
|
|
107
112
|
|
|
108
|
-
def get_migration_by_number(
|
|
109
|
-
migration_number: int | str,
|
|
110
|
-
) -> GraphMigration | InternalSchemaMigration | ArbitraryMigration:
|
|
113
|
+
def get_migration_by_number(migration_number: int | str) -> MigrationTypes:
|
|
111
114
|
# Convert to string and pad with zeros if needed
|
|
112
115
|
try:
|
|
113
116
|
num = int(migration_number)
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from infrahub.core import registry
|
|
2
|
+
from infrahub.core.branch import Branch
|
|
3
|
+
from infrahub.core.schema import SchemaRoot, internal_schema
|
|
4
|
+
from infrahub.core.schema.manager import SchemaManager
|
|
5
|
+
from infrahub.core.schema.schema_branch import SchemaBranch
|
|
6
|
+
from infrahub.database import InfrahubDatabase
|
|
7
|
+
from infrahub.exceptions import InitializationError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
async def get_or_load_schema_branch(db: InfrahubDatabase, branch: Branch) -> SchemaBranch:
|
|
11
|
+
try:
|
|
12
|
+
if registry.schema.has_schema_branch(branch.name):
|
|
13
|
+
return registry.schema.get_schema_branch(branch.name)
|
|
14
|
+
except InitializationError:
|
|
15
|
+
schema_manager = SchemaManager()
|
|
16
|
+
registry.schema = schema_manager
|
|
17
|
+
internal_schema_root = SchemaRoot(**internal_schema)
|
|
18
|
+
registry.schema.register_schema(schema=internal_schema_root)
|
|
19
|
+
schema_branch = await registry.schema.load_schema_from_db(db=db, branch=branch)
|
|
20
|
+
registry.schema.set_schema_branch(name=branch.name, schema=schema_branch)
|
|
21
|
+
return schema_branch
|
|
@@ -286,7 +286,7 @@ class Migration013AddInternalStatusData(AttributeAddQuery):
|
|
|
286
286
|
kwargs.pop("branch", None)
|
|
287
287
|
|
|
288
288
|
super().__init__(
|
|
289
|
-
|
|
289
|
+
node_kinds=["CoreGenericRepository"],
|
|
290
290
|
attribute_name="internal_status",
|
|
291
291
|
attribute_kind="Dropdown",
|
|
292
292
|
branch_support=BranchSupportType.LOCAL.value,
|
|
@@ -3,13 +3,10 @@ from __future__ import annotations
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
from typing import TYPE_CHECKING, Any
|
|
5
5
|
|
|
6
|
-
from rich.console import Console
|
|
7
|
-
|
|
8
6
|
from infrahub.constants.database import IndexType
|
|
9
7
|
from infrahub.core.attribute import MAX_STRING_LENGTH
|
|
10
|
-
from infrahub.core.migrations.shared import MigrationResult
|
|
8
|
+
from infrahub.core.migrations.shared import MigrationResult, get_migration_console
|
|
11
9
|
from infrahub.core.query import Query, QueryType
|
|
12
|
-
from infrahub.core.timestamp import Timestamp
|
|
13
10
|
from infrahub.database.index import IndexItem
|
|
14
11
|
from infrahub.database.neo4j import IndexManagerNeo4j
|
|
15
12
|
from infrahub.log import get_logger
|
|
@@ -467,13 +464,11 @@ class Migration037(ArbitraryMigration):
|
|
|
467
464
|
return result
|
|
468
465
|
|
|
469
466
|
async def execute(self, db: InfrahubDatabase) -> MigrationResult: # noqa: PLR0915
|
|
470
|
-
console =
|
|
467
|
+
console = get_migration_console()
|
|
471
468
|
result = MigrationResult()
|
|
472
469
|
|
|
473
470
|
# find the active schema attributes that have a LARGE_ATTRIBUTE_TYPE kind on all branches
|
|
474
|
-
console.print(
|
|
475
|
-
f"{Timestamp().to_string()} Determining schema attribute types and timestamps on all branches...", end=""
|
|
476
|
-
)
|
|
471
|
+
console.print("Determining schema attribute types and timestamps on all branches...", end="")
|
|
477
472
|
get_large_attribute_types_query = await GetLargeAttributeTypesQuery.init(db=db)
|
|
478
473
|
await get_large_attribute_types_query.execute(db=db)
|
|
479
474
|
schema_attribute_timeframes = get_large_attribute_types_query.get_large_attribute_type_timeframes()
|
|
@@ -481,10 +476,7 @@ class Migration037(ArbitraryMigration):
|
|
|
481
476
|
|
|
482
477
|
# find which schema attributes are large_types in the default branch, but updated to non-large_type on other branches
|
|
483
478
|
# {(kind, attr_name): SchemaAttributeTimeframe}
|
|
484
|
-
console.print(
|
|
485
|
-
f"{Timestamp().to_string()} Determining which schema attributes have been updated to non-large_type on non-default branches...",
|
|
486
|
-
end="",
|
|
487
|
-
)
|
|
479
|
+
console.print("Determining schema attribute updates on non-default branches...", end="")
|
|
488
480
|
main_schema_attribute_timeframes_map: dict[tuple[str, str], SchemaAttributeTimeframe] = {}
|
|
489
481
|
for schema_attr_time in schema_attribute_timeframes:
|
|
490
482
|
if schema_attr_time.is_default_branch:
|
|
@@ -508,7 +500,7 @@ class Migration037(ArbitraryMigration):
|
|
|
508
500
|
console.print("done")
|
|
509
501
|
|
|
510
502
|
# drop the index on the AttributeValueNonIndexed vertex, there won't be any at this point anyway
|
|
511
|
-
console.print(
|
|
503
|
+
console.print("Dropping index on AttributeValueIndexed vertices...", end="")
|
|
512
504
|
index_manager = IndexManagerNeo4j(db=db)
|
|
513
505
|
index_manager.init(nodes=[AV_INDEXED_INDEX], rels=[])
|
|
514
506
|
await index_manager.drop()
|
|
@@ -516,7 +508,7 @@ class Migration037(ArbitraryMigration):
|
|
|
516
508
|
|
|
517
509
|
# create the temporary non-indexed attribute value vertices for LARGE_ATTRIBUTE_TYPE attributes
|
|
518
510
|
# start with default branch
|
|
519
|
-
console.print(
|
|
511
|
+
console.print("Creating temporary non-indexed attribute values for large attribute types...", end="")
|
|
520
512
|
large_schema_attribute_timeframes = [
|
|
521
513
|
schema_attr_time for schema_attr_time in schema_attribute_timeframes if schema_attr_time.is_large_type
|
|
522
514
|
]
|
|
@@ -528,10 +520,7 @@ class Migration037(ArbitraryMigration):
|
|
|
528
520
|
console.print("done")
|
|
529
521
|
|
|
530
522
|
# re-index attribute values on branches where the type was updated to non-large_type
|
|
531
|
-
console.print(
|
|
532
|
-
f"{Timestamp().to_string()} Indexing attribute values on branches where the attribute schema was updated to a non-large_type...",
|
|
533
|
-
end="",
|
|
534
|
-
)
|
|
523
|
+
console.print("Re-indexing attribute values on branches updated to non-large types...", end="")
|
|
535
524
|
for schema_attr_time in large_type_reverts:
|
|
536
525
|
revert_non_index_on_branch_query = await RevertNonIndexOnBranchQuery.init(
|
|
537
526
|
db=db, schema_attribute_timeframe=schema_attr_time
|
|
@@ -540,27 +529,19 @@ class Migration037(ArbitraryMigration):
|
|
|
540
529
|
console.print("done")
|
|
541
530
|
|
|
542
531
|
# set the AttributeValue vertices to be AttributeValueIndexed
|
|
543
|
-
console.print(
|
|
544
|
-
f"{Timestamp().to_string()} Update all AttributeValue vertices to add the AttributeValueIndexed label...",
|
|
545
|
-
end="",
|
|
546
|
-
)
|
|
532
|
+
console.print("Adding AttributeValueIndexed label to AttributeValue vertices...", end="")
|
|
547
533
|
set_attribute_value_indexed_query = await SetAttributeValueIndexedQuery.init(db=db)
|
|
548
534
|
await set_attribute_value_indexed_query.execute(db=db)
|
|
549
535
|
console.print("done")
|
|
550
536
|
|
|
551
537
|
# set AttributeValueNonIndexed vertices to just AttributeValue
|
|
552
|
-
console.print(
|
|
553
|
-
f"{Timestamp().to_string()} Update all AttributeValueNonIndexed vertices to be AttributeValue (no index)...",
|
|
554
|
-
end="",
|
|
555
|
-
)
|
|
538
|
+
console.print("Restoring AttributeValue label on AttributeValueNonIndexed vertices...", end="")
|
|
556
539
|
finalize_attribute_value_non_indexed_query = await FinalizeAttributeValueNonIndexedQuery.init(db=db)
|
|
557
540
|
await finalize_attribute_value_non_indexed_query.execute(db=db)
|
|
558
541
|
console.print("done")
|
|
559
542
|
|
|
560
543
|
# de-index all attribute values too large to be indexed
|
|
561
|
-
console.print(
|
|
562
|
-
f"{Timestamp().to_string()} De-index any legacy attribute data that is too large to be indexed...", end=""
|
|
563
|
-
)
|
|
544
|
+
console.print("De-indexing legacy attribute data exceeding index limits...", end="")
|
|
564
545
|
de_index_large_attribute_values_query = await DeIndexLargeAttributeValuesQuery.init(
|
|
565
546
|
db=db, max_value_size=MAX_STRING_LENGTH
|
|
566
547
|
)
|
|
@@ -568,7 +549,7 @@ class Migration037(ArbitraryMigration):
|
|
|
568
549
|
console.print("done")
|
|
569
550
|
|
|
570
551
|
# add the index back to the AttributeValueNonIndexed vertex
|
|
571
|
-
console.print(
|
|
552
|
+
console.print("Adding index back to the AttributeValueIndexed label...", end="")
|
|
572
553
|
index_manager = IndexManagerNeo4j(db=db)
|
|
573
554
|
index_manager.init(nodes=[AV_INDEXED_INDEX], rels=[])
|
|
574
555
|
await index_manager.add()
|
|
@@ -4,14 +4,13 @@ import ipaddress
|
|
|
4
4
|
from dataclasses import dataclass
|
|
5
5
|
from typing import TYPE_CHECKING, Any
|
|
6
6
|
|
|
7
|
-
from rich.console import Console
|
|
8
7
|
from rich.progress import Progress
|
|
9
8
|
|
|
10
9
|
from infrahub.core.branch.models import Branch
|
|
11
10
|
from infrahub.core.constants import InfrahubKind
|
|
12
11
|
from infrahub.core.initialization import initialization
|
|
13
12
|
from infrahub.core.ipam.reconciler import IpamReconciler
|
|
14
|
-
from infrahub.core.migrations.shared import MigrationResult
|
|
13
|
+
from infrahub.core.migrations.shared import MigrationResult, get_migration_console
|
|
15
14
|
from infrahub.core.query import Query, QueryType
|
|
16
15
|
from infrahub.lock import initialize_lock
|
|
17
16
|
from infrahub.log import get_logger
|
|
@@ -235,13 +234,13 @@ class Migration039(ArbitraryMigration):
|
|
|
235
234
|
return MigrationResult()
|
|
236
235
|
|
|
237
236
|
async def execute(self, db: InfrahubDatabase) -> MigrationResult:
|
|
238
|
-
console =
|
|
237
|
+
console = get_migration_console()
|
|
239
238
|
result = MigrationResult()
|
|
240
239
|
# load schemas from database into registry
|
|
241
240
|
initialize_lock()
|
|
242
241
|
await initialization(db=db)
|
|
243
242
|
|
|
244
|
-
console.print("Identifying IP prefixes
|
|
243
|
+
console.print("Identifying IP prefixes and addresses to reconcile...", end="")
|
|
245
244
|
find_nodes_query = await FindNodesToReconcileQuery.init(db=db)
|
|
246
245
|
await find_nodes_query.execute(db=db)
|
|
247
246
|
console.print("done")
|
|
@@ -250,16 +249,17 @@ class Migration039(ArbitraryMigration):
|
|
|
250
249
|
# reconciler cannot correctly handle a prefix that is its own parent
|
|
251
250
|
ip_node_details_list = find_nodes_query.get_nodes_to_reconcile()
|
|
252
251
|
uuids_to_check = {ip_node_details.node_uuid for ip_node_details in ip_node_details_list}
|
|
253
|
-
console.
|
|
252
|
+
console.log(f"{len(ip_node_details_list)} IP prefixes or addresses will be reconciled.")
|
|
254
253
|
|
|
255
|
-
console.print("Deleting
|
|
254
|
+
console.print("Deleting self-parent relationships prior to reconciliation...", end="")
|
|
256
255
|
delete_self_parent_relationships_query = await DeleteSelfParentRelationshipsQuery.init(
|
|
257
256
|
db=db, uuids_to_check=list(uuids_to_check)
|
|
258
257
|
)
|
|
259
258
|
await delete_self_parent_relationships_query.execute(db=db)
|
|
260
259
|
console.print("done")
|
|
261
260
|
|
|
262
|
-
|
|
261
|
+
console.log("Reconciling IP prefixes and addresses across branches...")
|
|
262
|
+
with Progress(console=console) as progress:
|
|
263
263
|
reconcile_task = progress.add_task("Reconciling IP prefixes/addresses...", total=len(ip_node_details_list))
|
|
264
264
|
|
|
265
265
|
for ip_node_details in ip_node_details_list:
|
|
@@ -271,4 +271,6 @@ class Migration039(ArbitraryMigration):
|
|
|
271
271
|
)
|
|
272
272
|
progress.update(reconcile_task, advance=1)
|
|
273
273
|
|
|
274
|
+
console.log("IP prefix and address reconciliation complete.")
|
|
275
|
+
|
|
274
276
|
return result
|