infrahub-server 1.5.0b2__py3-none-any.whl → 1.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/api/dependencies.py +4 -13
- infrahub/api/transformation.py +22 -20
- infrahub/cli/db.py +87 -65
- infrahub/cli/upgrade.py +27 -7
- infrahub/core/diff/calculator.py +2 -2
- infrahub/core/diff/query/delete_query.py +9 -5
- infrahub/core/diff/query/merge.py +39 -23
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/migrations/graph/__init__.py +5 -3
- infrahub/core/migrations/graph/m037_index_attr_vals.py +11 -30
- infrahub/core/migrations/graph/m039_ipam_reconcile.py +9 -7
- infrahub/core/migrations/graph/m041_deleted_dup_edges.py +149 -0
- infrahub/core/migrations/graph/{m041_profile_attrs_in_db.py → m042_profile_attrs_in_db.py} +10 -8
- infrahub/core/migrations/graph/{m042_create_hfid_display_label_in_db.py → m043_create_hfid_display_label_in_db.py} +6 -6
- infrahub/core/migrations/graph/{m043_backfill_hfid_display_label_in_db.py → m044_backfill_hfid_display_label_in_db.py} +9 -11
- infrahub/core/migrations/shared.py +14 -0
- infrahub/core/models.py +2 -2
- infrahub/core/node/__init__.py +26 -1
- infrahub/core/query/diff.py +61 -16
- infrahub/core/query/ipam.py +15 -4
- infrahub/core/query/node.py +42 -40
- infrahub/core/relationship/model.py +10 -5
- infrahub/core/schema/definitions/core/check.py +1 -1
- infrahub/core/schema/definitions/core/transform.py +1 -1
- infrahub/core/schema/schema_branch_display.py +12 -0
- infrahub/core/schema/schema_branch_hfid.py +6 -0
- infrahub/core/validators/uniqueness/checker.py +2 -1
- infrahub/database/__init__.py +0 -13
- infrahub/graphql/analyzer.py +9 -0
- infrahub/graphql/mutations/branch.py +5 -0
- infrahub/graphql/mutations/proposed_change.py +6 -0
- infrahub/message_bus/types.py +1 -0
- infrahub/profiles/queries/get_profile_data.py +4 -5
- infrahub/proposed_change/tasks.py +43 -9
- infrahub_sdk/analyzer.py +1 -1
- infrahub_sdk/batch.py +2 -2
- infrahub_sdk/branch.py +14 -2
- infrahub_sdk/checks.py +1 -1
- infrahub_sdk/client.py +2 -4
- infrahub_sdk/ctl/branch.py +3 -0
- infrahub_sdk/ctl/cli_commands.py +2 -0
- infrahub_sdk/ctl/exceptions.py +1 -1
- infrahub_sdk/ctl/task.py +110 -0
- infrahub_sdk/exceptions.py +18 -18
- infrahub_sdk/graphql/query.py +2 -2
- infrahub_sdk/node/attribute.py +1 -1
- infrahub_sdk/node/property.py +1 -1
- infrahub_sdk/node/related_node.py +3 -3
- infrahub_sdk/node/relationship.py +4 -6
- infrahub_sdk/object_store.py +2 -2
- infrahub_sdk/operation.py +1 -1
- infrahub_sdk/protocols_generator/generator.py +1 -1
- infrahub_sdk/pytest_plugin/exceptions.py +9 -9
- infrahub_sdk/pytest_plugin/items/base.py +1 -1
- infrahub_sdk/pytest_plugin/items/check.py +1 -1
- infrahub_sdk/pytest_plugin/items/python_transform.py +1 -1
- infrahub_sdk/repository.py +1 -1
- infrahub_sdk/schema/__init__.py +1 -1
- infrahub_sdk/spec/object.py +7 -3
- infrahub_sdk/task/exceptions.py +4 -4
- infrahub_sdk/task/manager.py +2 -2
- infrahub_sdk/task/models.py +6 -4
- infrahub_sdk/timestamp.py +1 -1
- infrahub_sdk/transfer/exporter/json.py +1 -1
- infrahub_sdk/transfer/importer/json.py +1 -1
- infrahub_sdk/transforms.py +1 -1
- {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/METADATA +2 -2
- {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/RECORD +75 -73
- infrahub_testcontainers/container.py +31 -5
- infrahub_testcontainers/helpers.py +19 -4
- infrahub_testcontainers/models.py +8 -6
- infrahub_testcontainers/performance_test.py +6 -4
- {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/WHEEL +0 -0
- {infrahub_server-1.5.0b2.dist-info → infrahub_server-1.5.1.dist-info}/entry_points.txt +0 -0
infrahub/api/dependencies.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from typing import TYPE_CHECKING,
|
|
3
|
+
from typing import TYPE_CHECKING, AsyncGenerator
|
|
4
4
|
|
|
5
5
|
from fastapi import Depends, Query, Request
|
|
6
6
|
from fastapi.security import APIKeyHeader, HTTPAuthorizationCredentials, HTTPBearer
|
|
@@ -17,8 +17,6 @@ from infrahub.exceptions import AuthorizationError
|
|
|
17
17
|
from infrahub.permissions import PermissionManager
|
|
18
18
|
|
|
19
19
|
if TYPE_CHECKING:
|
|
20
|
-
from neo4j import AsyncSession
|
|
21
|
-
|
|
22
20
|
from infrahub.models import RefreshTokenData
|
|
23
21
|
|
|
24
22
|
jwt_scheme = HTTPBearer(auto_error=False)
|
|
@@ -36,16 +34,9 @@ class BranchParams(BaseModel):
|
|
|
36
34
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
37
35
|
|
|
38
36
|
|
|
39
|
-
async def
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
yield session
|
|
43
|
-
finally:
|
|
44
|
-
await session.close()
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
async def get_db(request: Request) -> InfrahubDatabase:
|
|
48
|
-
return request.app.state.db.start_session()
|
|
37
|
+
async def get_db(request: Request) -> AsyncGenerator[InfrahubDatabase, None]:
|
|
38
|
+
async with request.app.state.db.start_session() as db:
|
|
39
|
+
yield db
|
|
49
40
|
|
|
50
41
|
|
|
51
42
|
async def get_access_token(
|
infrahub/api/transformation.py
CHANGED
|
@@ -64,17 +64,18 @@ async def transform_python(
|
|
|
64
64
|
message="Repository doesn't have a commit",
|
|
65
65
|
)
|
|
66
66
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
67
|
+
async with db.start_session(read_only=True) as dbs:
|
|
68
|
+
gql_params = await prepare_graphql_params(
|
|
69
|
+
db=dbs, branch=branch_params.branch, at=branch_params.at, service=request.app.state.service
|
|
70
|
+
)
|
|
70
71
|
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
72
|
+
result = await graphql(
|
|
73
|
+
schema=gql_params.schema,
|
|
74
|
+
source=query.query.value,
|
|
75
|
+
context_value=gql_params.context,
|
|
76
|
+
root_value=None,
|
|
77
|
+
variable_values=params,
|
|
78
|
+
)
|
|
78
79
|
|
|
79
80
|
data = extract_data(query_name=query.name.value, result=result)
|
|
80
81
|
|
|
@@ -128,17 +129,18 @@ async def transform_jinja2(
|
|
|
128
129
|
message="Repository doesn't have a commit",
|
|
129
130
|
)
|
|
130
131
|
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
132
|
+
async with db.start_session(read_only=True) as dbs:
|
|
133
|
+
gql_params = await prepare_graphql_params(
|
|
134
|
+
db=dbs, branch=branch_params.branch, at=branch_params.at, service=request.app.state.service
|
|
135
|
+
)
|
|
134
136
|
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
137
|
+
result = await graphql(
|
|
138
|
+
schema=gql_params.schema,
|
|
139
|
+
source=query.query.value,
|
|
140
|
+
context_value=gql_params.context,
|
|
141
|
+
root_value=None,
|
|
142
|
+
variable_values=params,
|
|
143
|
+
)
|
|
142
144
|
|
|
143
145
|
data = extract_data(query_name=query.name.value, result=result)
|
|
144
146
|
|
infrahub/cli/db.py
CHANGED
|
@@ -13,7 +13,6 @@ import typer
|
|
|
13
13
|
import ujson
|
|
14
14
|
from infrahub_sdk.async_typer import AsyncTyper
|
|
15
15
|
from prefect.testing.utilities import prefect_test_harness
|
|
16
|
-
from rich import print as rprint
|
|
17
16
|
from rich.console import Console
|
|
18
17
|
from rich.table import Table
|
|
19
18
|
|
|
@@ -22,6 +21,7 @@ from infrahub.auth import AccountSession, AuthType
|
|
|
22
21
|
from infrahub.context import InfrahubContext
|
|
23
22
|
from infrahub.core import registry
|
|
24
23
|
from infrahub.core.branch import Branch
|
|
24
|
+
from infrahub.core.branch.enums import BranchStatus
|
|
25
25
|
from infrahub.core.branch.tasks import rebase_branch
|
|
26
26
|
from infrahub.core.constants import GLOBAL_BRANCH_NAME
|
|
27
27
|
from infrahub.core.graph import GRAPH_VERSION
|
|
@@ -40,6 +40,7 @@ from infrahub.core.migrations.exceptions import MigrationFailureError
|
|
|
40
40
|
from infrahub.core.migrations.graph import get_graph_migrations, get_migration_by_number
|
|
41
41
|
from infrahub.core.migrations.schema.models import SchemaApplyMigrationData
|
|
42
42
|
from infrahub.core.migrations.schema.tasks import schema_apply_migrations
|
|
43
|
+
from infrahub.core.migrations.shared import get_migration_console
|
|
43
44
|
from infrahub.core.schema import SchemaRoot, core_models, internal_schema
|
|
44
45
|
from infrahub.core.schema.definitions.deprecated import deprecated_models
|
|
45
46
|
from infrahub.core.schema.manager import SchemaManager
|
|
@@ -284,27 +285,27 @@ async def detect_migration_to_run(
|
|
|
284
285
|
current_graph_version: int, migration_number: int | str | None = None
|
|
285
286
|
) -> Sequence[MigrationTypes]:
|
|
286
287
|
"""Return a sequence of migrations to apply to upgrade the database."""
|
|
287
|
-
|
|
288
|
+
get_migration_console().log("Checking current state of the database")
|
|
288
289
|
migrations: list[MigrationTypes] = []
|
|
289
290
|
|
|
290
291
|
if migration_number:
|
|
291
292
|
migration = get_migration_by_number(migration_number)
|
|
292
293
|
migrations.append(migration)
|
|
293
294
|
if current_graph_version > migration.minimum_version:
|
|
294
|
-
|
|
295
|
+
get_migration_console().log(
|
|
295
296
|
f"Migration {migration_number} already applied. To apply again, run the command without the --check flag."
|
|
296
297
|
)
|
|
297
298
|
return []
|
|
298
|
-
|
|
299
|
+
get_migration_console().log(
|
|
299
300
|
f"Migration {migration_number} needs to be applied. Run `infrahub db migrate` to apply all outstanding migrations."
|
|
300
301
|
)
|
|
301
302
|
else:
|
|
302
303
|
migrations.extend(await get_graph_migrations(current_graph_version=current_graph_version))
|
|
303
304
|
if not migrations:
|
|
304
|
-
|
|
305
|
+
get_migration_console().log(f"Database up-to-date (v{current_graph_version}), no migration to execute.")
|
|
305
306
|
return []
|
|
306
307
|
|
|
307
|
-
|
|
308
|
+
get_migration_console().log(
|
|
308
309
|
f"Database needs to be updated (v{current_graph_version} -> v{GRAPH_VERSION}), {len(migrations)} migrations pending"
|
|
309
310
|
)
|
|
310
311
|
return migrations
|
|
@@ -337,38 +338,57 @@ async def migrate_database(
|
|
|
337
338
|
if execution_result.success:
|
|
338
339
|
validation_result = await migration.validate_migration(db=db)
|
|
339
340
|
if validation_result.success:
|
|
340
|
-
|
|
341
|
+
get_migration_console().log(f"Migration: {migration.name} {SUCCESS_BADGE}")
|
|
341
342
|
root_node.graph_version = migration.minimum_version + 1
|
|
342
343
|
await root_node.save(db=db)
|
|
343
344
|
|
|
344
345
|
if not execution_result.success or (validation_result and not validation_result.success):
|
|
345
|
-
|
|
346
|
+
get_migration_console().log(f"Migration: {migration.name} {FAILED_BADGE}")
|
|
346
347
|
for error in execution_result.errors:
|
|
347
|
-
|
|
348
|
+
get_migration_console().log(f" {error}")
|
|
348
349
|
if validation_result and not validation_result.success:
|
|
349
350
|
for error in validation_result.errors:
|
|
350
|
-
|
|
351
|
+
get_migration_console().log(f" {error}")
|
|
351
352
|
return False
|
|
352
353
|
|
|
353
354
|
return True
|
|
354
355
|
|
|
355
356
|
|
|
356
|
-
async def
|
|
357
|
-
"""Trigger rebase of non-default branches, also triggering migrations in the process."""
|
|
357
|
+
async def mark_branches_needing_rebase(db: InfrahubDatabase) -> list[Branch]:
|
|
358
358
|
branches = [b for b in await Branch.get_list(db=db) if b.name not in [registry.default_branch, GLOBAL_BRANCH_NAME]]
|
|
359
|
+
if not branches:
|
|
360
|
+
return []
|
|
361
|
+
|
|
362
|
+
branches_needing_rebase: list[Branch] = []
|
|
363
|
+
for branch in branches:
|
|
364
|
+
if branch.graph_version == GRAPH_VERSION:
|
|
365
|
+
continue
|
|
366
|
+
|
|
367
|
+
branch.status = BranchStatus.NEED_UPGRADE_REBASE
|
|
368
|
+
await branch.save(db=db)
|
|
369
|
+
branches_needing_rebase.append(branch)
|
|
370
|
+
|
|
371
|
+
return branches_needing_rebase
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
async def trigger_rebase_branches(db: InfrahubDatabase, branches: Sequence[Branch]) -> None:
|
|
375
|
+
"""Trigger rebase of non-default branches, also triggering migrations in the process."""
|
|
359
376
|
if not branches:
|
|
360
377
|
return
|
|
361
378
|
|
|
362
|
-
|
|
379
|
+
get_migration_console().log(
|
|
380
|
+
f"Planning rebase and migrations for {len(branches)} {'branches' if len(branches) != 1 else 'branch'}: "
|
|
381
|
+
f"{', '.join([b.name for b in branches])}"
|
|
382
|
+
)
|
|
363
383
|
|
|
364
384
|
for branch in branches:
|
|
365
385
|
if branch.graph_version == GRAPH_VERSION:
|
|
366
|
-
|
|
386
|
+
get_migration_console().log(
|
|
367
387
|
f"Ignoring branch rebase and migrations for '{branch.name}' (ID: {branch.uuid}), it is already up-to-date"
|
|
368
388
|
)
|
|
369
389
|
continue
|
|
370
390
|
|
|
371
|
-
|
|
391
|
+
get_migration_console().print(f"Rebasing branch '{branch.name}' (ID: {branch.uuid})...", end="")
|
|
372
392
|
try:
|
|
373
393
|
await registry.schema.load_schema(db=db, branch=branch)
|
|
374
394
|
await rebase_branch(
|
|
@@ -378,9 +398,9 @@ async def trigger_rebase_branches(db: InfrahubDatabase) -> None:
|
|
|
378
398
|
),
|
|
379
399
|
send_events=False,
|
|
380
400
|
)
|
|
381
|
-
|
|
401
|
+
get_migration_console().log(SUCCESS_BADGE)
|
|
382
402
|
except (ValidationError, MigrationFailureError):
|
|
383
|
-
|
|
403
|
+
get_migration_console().log(FAILED_BADGE)
|
|
384
404
|
|
|
385
405
|
|
|
386
406
|
async def initialize_internal_schema() -> None:
|
|
@@ -417,16 +437,16 @@ async def update_core_schema(db: InfrahubDatabase, initialize: bool = True, debu
|
|
|
417
437
|
branch_schema.validate_node_deletions(diff=schema_diff)
|
|
418
438
|
result = branch_schema.validate_update(other=candidate_schema, diff=schema_diff, enforce_update_support=False)
|
|
419
439
|
if result.errors:
|
|
420
|
-
|
|
440
|
+
get_migration_console().log(f"{ERROR_BADGE} | Unable to update the schema, due to failed validations")
|
|
421
441
|
for error in result.errors:
|
|
422
|
-
|
|
442
|
+
get_migration_console().log(error.to_string())
|
|
423
443
|
raise typer.Exit(1)
|
|
424
444
|
|
|
425
445
|
if not result.diff.all:
|
|
426
|
-
|
|
446
|
+
get_migration_console().log("Core Schema Up to date, nothing to update")
|
|
427
447
|
return
|
|
428
448
|
|
|
429
|
-
|
|
449
|
+
get_migration_console().log("Core Schema has diff, will need to be updated")
|
|
430
450
|
if debug:
|
|
431
451
|
result.diff.print()
|
|
432
452
|
|
|
@@ -441,9 +461,9 @@ async def update_core_schema(db: InfrahubDatabase, initialize: bool = True, debu
|
|
|
441
461
|
responses = await schema_validate_migrations(message=validate_migration_data)
|
|
442
462
|
error_messages = [violation.message for response in responses for violation in response.violations]
|
|
443
463
|
if error_messages:
|
|
444
|
-
|
|
464
|
+
get_migration_console().log(f"{ERROR_BADGE} | Unable to update the schema, due to failed validations")
|
|
445
465
|
for message in error_messages:
|
|
446
|
-
|
|
466
|
+
get_migration_console().log(message)
|
|
447
467
|
raise typer.Exit(1)
|
|
448
468
|
|
|
449
469
|
# ----------------------------------------------------------
|
|
@@ -466,9 +486,11 @@ async def update_core_schema(db: InfrahubDatabase, initialize: bool = True, debu
|
|
|
466
486
|
update_db=True,
|
|
467
487
|
)
|
|
468
488
|
default_branch.update_schema_hash()
|
|
469
|
-
|
|
489
|
+
get_migration_console().log(
|
|
490
|
+
"The Core Schema has been updated, make sure to rebase any open branches after the upgrade"
|
|
491
|
+
)
|
|
470
492
|
if debug:
|
|
471
|
-
|
|
493
|
+
get_migration_console().log(f"New schema hash: {default_branch.active_schema_hash.main}")
|
|
472
494
|
await default_branch.save(db=dbt)
|
|
473
495
|
|
|
474
496
|
# ----------------------------------------------------------
|
|
@@ -483,9 +505,9 @@ async def update_core_schema(db: InfrahubDatabase, initialize: bool = True, debu
|
|
|
483
505
|
migration_error_msgs = await schema_apply_migrations(message=apply_migration_data)
|
|
484
506
|
|
|
485
507
|
if migration_error_msgs:
|
|
486
|
-
|
|
508
|
+
get_migration_console().log(f"{ERROR_BADGE} | Some error(s) happened while running the schema migrations")
|
|
487
509
|
for message in migration_error_msgs:
|
|
488
|
-
|
|
510
|
+
get_migration_console().log(message)
|
|
489
511
|
raise typer.Exit(1)
|
|
490
512
|
|
|
491
513
|
|
|
@@ -609,16 +631,16 @@ RETURN vertices, edges
|
|
|
609
631
|
edge_csv_writer.writeheader()
|
|
610
632
|
|
|
611
633
|
while has_more_data:
|
|
612
|
-
|
|
634
|
+
get_migration_console().print("Retrieving batch of vertices and edges...", end="")
|
|
613
635
|
results = await db.execute_query(
|
|
614
636
|
query=query,
|
|
615
637
|
params={"kinds": kinds, "uuids": uuids, "limit": limit, "offset": offset},
|
|
616
638
|
)
|
|
617
|
-
|
|
639
|
+
get_migration_console().print("done. ", end="")
|
|
618
640
|
has_more_data = len(results) >= limit
|
|
619
641
|
offset += limit
|
|
620
642
|
|
|
621
|
-
|
|
643
|
+
get_migration_console().print("Writing batch to export files...", end="")
|
|
622
644
|
for result in results:
|
|
623
645
|
vertices = result.get("vertices")
|
|
624
646
|
for vertex in vertices:
|
|
@@ -649,10 +671,10 @@ RETURN vertices, edges
|
|
|
649
671
|
serial_edge[property_name] = value
|
|
650
672
|
edge_csv_writer.writerow(serial_edge)
|
|
651
673
|
all_db_ids.add(edge.element_id)
|
|
652
|
-
|
|
674
|
+
get_migration_console().log("done.")
|
|
653
675
|
|
|
654
|
-
|
|
655
|
-
|
|
676
|
+
get_migration_console().log(f"{SUCCESS_BADGE} Export complete")
|
|
677
|
+
get_migration_console().log(f"Export directory is here: {export_dir.absolute()}")
|
|
656
678
|
return export_dir
|
|
657
679
|
|
|
658
680
|
|
|
@@ -690,9 +712,9 @@ UNWIND $vertices AS vertex
|
|
|
690
712
|
CREATE (v:ImportNode:%(node_labels)s {db_id: vertex.db_id})
|
|
691
713
|
SET v = vertex
|
|
692
714
|
""" % {"node_labels": ":".join(vertex_labels)}
|
|
693
|
-
|
|
715
|
+
get_migration_console().print(f"Loading {len(vertex_dicts)} {vertex_labels} nodes...", end="")
|
|
694
716
|
await db.execute_query(query=vertex_import_query, params={"vertices": vertex_dicts})
|
|
695
|
-
|
|
717
|
+
get_migration_console().log("done")
|
|
696
718
|
|
|
697
719
|
|
|
698
720
|
async def load_edges(
|
|
@@ -705,17 +727,17 @@ MATCH (b:ImportNode) WHERE b.db_id = toString(edge.end_node_id)
|
|
|
705
727
|
CREATE (a)-[e:%(edge_type)s]->(b)
|
|
706
728
|
SET e = edge.properties
|
|
707
729
|
""" % {"edge_type": edge_type}
|
|
708
|
-
|
|
730
|
+
get_migration_console().print(f"Loading {len(edge_dicts)} {edge_type} edges...", end="")
|
|
709
731
|
await db.execute_query(query=edges_import_query, params={"edges": edge_dicts})
|
|
710
|
-
|
|
732
|
+
get_migration_console().log("done")
|
|
711
733
|
|
|
712
734
|
|
|
713
735
|
async def load_export(db: InfrahubDatabase, export_dir: Path, query_limit: int = 1000) -> None:
|
|
714
736
|
if not export_dir.exists():
|
|
715
|
-
|
|
737
|
+
get_migration_console().log(f"{ERROR_BADGE} {export_dir} does not exist")
|
|
716
738
|
raise typer.Exit(1)
|
|
717
739
|
if not export_dir.is_dir():
|
|
718
|
-
|
|
740
|
+
get_migration_console().log(f"{ERROR_BADGE} {export_dir} is not a directory")
|
|
719
741
|
raise typer.Exit(1)
|
|
720
742
|
vertex_file: Path | None = None
|
|
721
743
|
edge_file: Path | None = None
|
|
@@ -726,17 +748,17 @@ async def load_export(db: InfrahubDatabase, export_dir: Path, query_limit: int =
|
|
|
726
748
|
elif export_file.name == "edges.csv":
|
|
727
749
|
edge_file = export_file
|
|
728
750
|
if not vertex_file or not vertex_file.exists() or not vertex_file.is_file():
|
|
729
|
-
|
|
751
|
+
get_migration_console().log(f"{ERROR_BADGE} File 'vertices.csv' does not exist in the export directory")
|
|
730
752
|
raise typer.Exit(1)
|
|
731
753
|
if not edge_file or not edge_file.exists() or not edge_file.is_file():
|
|
732
|
-
|
|
754
|
+
get_migration_console().log(f"{ERROR_BADGE} File 'edges.csv' does not exist in the export directory")
|
|
733
755
|
raise typer.Exit(1)
|
|
734
756
|
|
|
735
757
|
# index massively improves time required to load a large export
|
|
736
758
|
create_index_query = "CREATE RANGE INDEX import_node_db_id IF NOT EXISTS FOR (v:ImportNode) ON (v.db_id)"
|
|
737
759
|
await db.execute_query(query=create_index_query)
|
|
738
760
|
|
|
739
|
-
|
|
761
|
+
get_migration_console().log("Loading vertices...")
|
|
740
762
|
vertices_by_labels_map: dict[frozenset[str], list[dict[str, Any]]] = defaultdict(list)
|
|
741
763
|
with vertex_file.open() as file:
|
|
742
764
|
csv_reader = DictReader(file)
|
|
@@ -751,9 +773,9 @@ async def load_export(db: InfrahubDatabase, export_dir: Path, query_limit: int =
|
|
|
751
773
|
|
|
752
774
|
for labels, vertex_rows in vertices_by_labels_map.items():
|
|
753
775
|
await load_vertices(db=db, vertex_labels=list(labels), vertex_dicts=vertex_rows)
|
|
754
|
-
|
|
776
|
+
get_migration_console().log("Vertices loaded")
|
|
755
777
|
|
|
756
|
-
|
|
778
|
+
get_migration_console().log("Loading edges...")
|
|
757
779
|
edges_by_type_map: dict[str, list[dict[str, Any]]] = defaultdict(list)
|
|
758
780
|
with edge_file.open() as file:
|
|
759
781
|
csv_reader = DictReader(file)
|
|
@@ -778,8 +800,8 @@ async def load_export(db: InfrahubDatabase, export_dir: Path, query_limit: int =
|
|
|
778
800
|
|
|
779
801
|
for edge_type, edge_dicts in edges_by_type_map.items():
|
|
780
802
|
await load_edges(db=db, edge_type=edge_type, edge_dicts=edge_dicts)
|
|
781
|
-
|
|
782
|
-
|
|
803
|
+
get_migration_console().log("Edges loaded")
|
|
804
|
+
get_migration_console().log(f"{SUCCESS_BADGE} Export loaded")
|
|
783
805
|
|
|
784
806
|
|
|
785
807
|
@app.command(name="check")
|
|
@@ -822,10 +844,10 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
822
844
|
db: The database object.
|
|
823
845
|
output_dir: Directory to save detailed check results.
|
|
824
846
|
"""
|
|
825
|
-
|
|
847
|
+
get_migration_console().log("Running database health checks...")
|
|
826
848
|
|
|
827
849
|
# Check 1: Duplicate active relationships
|
|
828
|
-
|
|
850
|
+
get_migration_console().log("\n[bold cyan]Check 1: Duplicate Active Relationships[/bold cyan]")
|
|
829
851
|
duplicate_active_rels_query = """
|
|
830
852
|
MATCH (a:Node)-[e1:IS_RELATED {status: "active"}]-(r:Relationship)-[e2:IS_RELATED {branch: e1.branch, status: "active"}]-(b:Node)
|
|
831
853
|
WHERE a.uuid < b.uuid
|
|
@@ -849,7 +871,7 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
849
871
|
|
|
850
872
|
results = await db.execute_query(query=duplicate_active_rels_query)
|
|
851
873
|
if results:
|
|
852
|
-
|
|
874
|
+
get_migration_console().log(f"[red]Found {len(results)} duplicate active relationships[/red]")
|
|
853
875
|
# Write detailed results to file
|
|
854
876
|
output_file = output_dir / "duplicate_active_relationships.csv"
|
|
855
877
|
with output_file.open(mode="w", newline="") as f:
|
|
@@ -859,12 +881,12 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
859
881
|
writer.writeheader()
|
|
860
882
|
for result in results:
|
|
861
883
|
writer.writerow(dict(result))
|
|
862
|
-
|
|
884
|
+
get_migration_console().log(f" Detailed results written to: {output_file}")
|
|
863
885
|
else:
|
|
864
|
-
|
|
886
|
+
get_migration_console().log(f"{SUCCESS_BADGE} No duplicate active relationships found")
|
|
865
887
|
|
|
866
888
|
# Check 2: Duplicated relationship nodes
|
|
867
|
-
|
|
889
|
+
get_migration_console().log("\n[bold cyan]Check 2: Duplicated Relationship Nodes[/bold cyan]")
|
|
868
890
|
duplicate_rel_nodes_query = """
|
|
869
891
|
MATCH (r:Relationship)
|
|
870
892
|
WITH r.uuid AS r_uuid, COUNT(*) AS num_rels
|
|
@@ -877,7 +899,7 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
877
899
|
|
|
878
900
|
results = await db.execute_query(query=duplicate_rel_nodes_query)
|
|
879
901
|
if results:
|
|
880
|
-
|
|
902
|
+
get_migration_console().log(f"[red]Found {len(results)} duplicated relationship nodes[/red]")
|
|
881
903
|
# Write detailed results to file
|
|
882
904
|
output_file = output_dir / "duplicated_relationship_nodes.csv"
|
|
883
905
|
with output_file.open(mode="w", newline="") as f:
|
|
@@ -885,12 +907,12 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
885
907
|
writer.writeheader()
|
|
886
908
|
for result in results:
|
|
887
909
|
writer.writerow(dict(result))
|
|
888
|
-
|
|
910
|
+
get_migration_console().log(f" Detailed results written to: {output_file}")
|
|
889
911
|
else:
|
|
890
|
-
|
|
912
|
+
get_migration_console().log(f"{SUCCESS_BADGE} No duplicated relationship nodes found")
|
|
891
913
|
|
|
892
914
|
# Check 3: Duplicated edges
|
|
893
|
-
|
|
915
|
+
get_migration_console().log("\n[bold cyan]Check 3: Duplicated Edges[/bold cyan]")
|
|
894
916
|
duplicate_edges_query = """
|
|
895
917
|
MATCH (a)
|
|
896
918
|
CALL (a) {
|
|
@@ -920,7 +942,7 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
920
942
|
|
|
921
943
|
results = await db.execute_query(query=duplicate_edges_query)
|
|
922
944
|
if results:
|
|
923
|
-
|
|
945
|
+
get_migration_console().log(f"[red]Found {len(results)} sets of duplicated edges[/red]")
|
|
924
946
|
# Write detailed results to file
|
|
925
947
|
output_file = output_dir / "duplicated_edges.csv"
|
|
926
948
|
with output_file.open(mode="w", newline="") as f:
|
|
@@ -931,12 +953,12 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
931
953
|
writer.writeheader()
|
|
932
954
|
for result in results:
|
|
933
955
|
writer.writerow(dict(result))
|
|
934
|
-
|
|
956
|
+
get_migration_console().log(f" Detailed results written to: {output_file}")
|
|
935
957
|
else:
|
|
936
|
-
|
|
958
|
+
get_migration_console().log(f"{SUCCESS_BADGE} No duplicated edges found")
|
|
937
959
|
|
|
938
960
|
# Check 4: Orphaned Relationships
|
|
939
|
-
|
|
961
|
+
get_migration_console().log("\n[bold cyan]Check 4: Orphaned Relationships[/bold cyan]")
|
|
940
962
|
orphaned_rels_query = """
|
|
941
963
|
MATCH (r:Relationship)-[:IS_RELATED]-(peer:Node)
|
|
942
964
|
WITH DISTINCT r, peer
|
|
@@ -954,7 +976,7 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
954
976
|
"""
|
|
955
977
|
results = await db.execute_query(query=orphaned_rels_query)
|
|
956
978
|
if results:
|
|
957
|
-
|
|
979
|
+
get_migration_console().log(f"[red]Found {len(results)} orphaned Relationships[/red]")
|
|
958
980
|
# Write detailed results to file
|
|
959
981
|
output_file = output_dir / "orphaned_relationships.csv"
|
|
960
982
|
with output_file.open(mode="w", newline="") as f:
|
|
@@ -965,9 +987,9 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
965
987
|
writer.writeheader()
|
|
966
988
|
for result in results:
|
|
967
989
|
writer.writerow(dict(result))
|
|
968
|
-
|
|
990
|
+
get_migration_console().log(f" Detailed results written to: {output_file}")
|
|
969
991
|
else:
|
|
970
|
-
|
|
992
|
+
get_migration_console().log(f"{SUCCESS_BADGE} No orphaned relationships found")
|
|
971
993
|
|
|
972
|
-
|
|
973
|
-
|
|
994
|
+
get_migration_console().log(f"\n{SUCCESS_BADGE} Database health checks completed")
|
|
995
|
+
get_migration_console().log(f"Detailed results saved to: {output_dir.absolute()}")
|
infrahub/cli/upgrade.py
CHANGED
|
@@ -8,7 +8,6 @@ import typer
|
|
|
8
8
|
from deepdiff import DeepDiff
|
|
9
9
|
from infrahub_sdk.async_typer import AsyncTyper
|
|
10
10
|
from prefect.client.orchestration import get_client
|
|
11
|
-
from rich import print as rprint
|
|
12
11
|
|
|
13
12
|
from infrahub import config
|
|
14
13
|
from infrahub.core.initialization import (
|
|
@@ -18,6 +17,7 @@ from infrahub.core.initialization import (
|
|
|
18
17
|
initialize_registry,
|
|
19
18
|
)
|
|
20
19
|
from infrahub.core.manager import NodeManager
|
|
20
|
+
from infrahub.core.migrations.shared import get_migration_console
|
|
21
21
|
from infrahub.core.protocols import CoreAccount, CoreObjectPermission
|
|
22
22
|
from infrahub.dependencies.registry import build_component_registry
|
|
23
23
|
from infrahub.lock import initialize_lock
|
|
@@ -35,6 +35,7 @@ from infrahub.workflows.initialization import (
|
|
|
35
35
|
from .db import (
|
|
36
36
|
detect_migration_to_run,
|
|
37
37
|
initialize_internal_schema,
|
|
38
|
+
mark_branches_needing_rebase,
|
|
38
39
|
migrate_database,
|
|
39
40
|
trigger_rebase_branches,
|
|
40
41
|
update_core_schema,
|
|
@@ -42,9 +43,11 @@ from .db import (
|
|
|
42
43
|
|
|
43
44
|
if TYPE_CHECKING:
|
|
44
45
|
from infrahub.cli.context import CliContext
|
|
46
|
+
from infrahub.core.branch.models import Branch
|
|
45
47
|
from infrahub.database import InfrahubDatabase
|
|
46
48
|
|
|
47
49
|
app = AsyncTyper()
|
|
50
|
+
console = get_migration_console()
|
|
48
51
|
|
|
49
52
|
|
|
50
53
|
@app.command(name="upgrade")
|
|
@@ -53,6 +56,9 @@ async def upgrade_cmd(
|
|
|
53
56
|
config_file: str = typer.Argument("infrahub.toml", envvar="INFRAHUB_CONFIG"),
|
|
54
57
|
check: bool = typer.Option(False, help="Check the state of the system without upgrading."),
|
|
55
58
|
rebase_branches: bool = typer.Option(False, help="Rebase and apply migrations to branches if required."),
|
|
59
|
+
interactive: bool = typer.Option(
|
|
60
|
+
False, help="Use interactive prompt to accept or deny rebase of individual branches."
|
|
61
|
+
),
|
|
56
62
|
) -> None:
|
|
57
63
|
"""Upgrade Infrahub to the latest version."""
|
|
58
64
|
|
|
@@ -90,7 +96,7 @@ async def upgrade_cmd(
|
|
|
90
96
|
|
|
91
97
|
if not await migrate_database(db=dbdriver, initialize=False, migrations=migrations):
|
|
92
98
|
# A migration failed, stop the upgrade process
|
|
93
|
-
|
|
99
|
+
console.log("Upgrade cancelled due to migration failure.")
|
|
94
100
|
await dbdriver.close()
|
|
95
101
|
return
|
|
96
102
|
|
|
@@ -115,8 +121,22 @@ async def upgrade_cmd(
|
|
|
115
121
|
# -------------------------------------------
|
|
116
122
|
# Perform branch rebase and apply migrations to them
|
|
117
123
|
# -------------------------------------------
|
|
124
|
+
branches = await mark_branches_needing_rebase(db=dbdriver)
|
|
125
|
+
plural = len(branches) != 1
|
|
126
|
+
get_migration_console().log(
|
|
127
|
+
f"Found {len(branches)} {'branches' if plural else 'branch'} that {'need' if plural else 'needs'} to be rebased"
|
|
128
|
+
)
|
|
129
|
+
|
|
118
130
|
if rebase_branches:
|
|
119
|
-
|
|
131
|
+
branches_to_rebase: list[Branch] = []
|
|
132
|
+
if not interactive:
|
|
133
|
+
branches_to_rebase = branches
|
|
134
|
+
else:
|
|
135
|
+
for branch in branches:
|
|
136
|
+
if typer.confirm(f"Rebase branch {branch.name}?"):
|
|
137
|
+
branches_to_rebase.append(branch)
|
|
138
|
+
|
|
139
|
+
await trigger_rebase_branches(db=dbdriver, branches=branches_to_rebase)
|
|
120
140
|
|
|
121
141
|
await dbdriver.close()
|
|
122
142
|
|
|
@@ -134,21 +154,21 @@ async def upgrade_menu(db: InfrahubDatabase) -> None:
|
|
|
134
154
|
diff_menu = DeepDiff(menu_items.to_rest(), default_menu_dict.to_rest(), ignore_order=True)
|
|
135
155
|
|
|
136
156
|
if not diff_menu:
|
|
137
|
-
|
|
157
|
+
console.log("Menu Up to date, nothing to update")
|
|
138
158
|
return
|
|
139
159
|
|
|
140
160
|
await menu_repository.update_menu(existing_menu=menu_items, new_menu=default_menu_dict, menu_nodes=menu_nodes)
|
|
141
|
-
|
|
161
|
+
console.log("Menu has been updated")
|
|
142
162
|
|
|
143
163
|
|
|
144
164
|
async def upgrade_permissions(db: InfrahubDatabase) -> None:
|
|
145
165
|
existing_permissions = await NodeManager.query(schema=CoreObjectPermission, db=db, limit=1)
|
|
146
166
|
if existing_permissions:
|
|
147
|
-
|
|
167
|
+
console.log("Permissions Up to date, nothing to update")
|
|
148
168
|
return
|
|
149
169
|
|
|
150
170
|
await setup_permissions(db=db)
|
|
151
|
-
|
|
171
|
+
console.log("Permissions have been updated")
|
|
152
172
|
|
|
153
173
|
|
|
154
174
|
async def setup_permissions(db: InfrahubDatabase) -> None:
|
infrahub/core/diff/calculator.py
CHANGED
|
@@ -141,8 +141,8 @@ class DiffCalculator:
|
|
|
141
141
|
to_time=to_time,
|
|
142
142
|
previous_node_field_specifiers=previous_node_specifiers,
|
|
143
143
|
)
|
|
144
|
-
node_limit = int(config.SETTINGS.database.query_size_limit / 10)
|
|
145
|
-
fields_limit = int(config.SETTINGS.database.query_size_limit / 3)
|
|
144
|
+
node_limit = max(int(config.SETTINGS.database.query_size_limit / 10), 1)
|
|
145
|
+
fields_limit = max(int(config.SETTINGS.database.query_size_limit / 3), 1)
|
|
146
146
|
properties_limit = config.SETTINGS.database.query_size_limit
|
|
147
147
|
|
|
148
148
|
calculation_request = DiffCalculationRequest(
|
|
@@ -20,10 +20,14 @@ class EnrichedDiffDeleteQuery(Query):
|
|
|
20
20
|
diff_filter = "WHERE d_root.uuid IN $diff_root_uuids"
|
|
21
21
|
|
|
22
22
|
query = """
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
23
|
+
MATCH (d_root:DiffRoot)
|
|
24
|
+
%(diff_filter)s
|
|
25
|
+
OPTIONAL MATCH (d_root)-[*]->(diff_thing)
|
|
26
|
+
WITH DISTINCT d_root, diff_thing
|
|
27
|
+
ORDER BY elementId(diff_thing)
|
|
28
|
+
CALL (diff_thing) {
|
|
29
|
+
DETACH DELETE diff_thing
|
|
30
|
+
} IN TRANSACTIONS
|
|
31
|
+
DETACH DELETE d_root
|
|
28
32
|
""" % {"diff_filter": diff_filter}
|
|
29
33
|
self.add_to_query(query=query)
|