infrahub-server 1.5.0b1__py3-none-any.whl → 1.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/api/dependencies.py +4 -13
- infrahub/api/internal.py +2 -0
- infrahub/api/oauth2.py +13 -19
- infrahub/api/oidc.py +15 -21
- infrahub/api/schema.py +24 -3
- infrahub/api/transformation.py +22 -20
- infrahub/artifacts/models.py +2 -1
- infrahub/auth.py +137 -3
- infrahub/cli/__init__.py +2 -0
- infrahub/cli/db.py +158 -155
- infrahub/cli/dev.py +118 -0
- infrahub/cli/tasks.py +46 -0
- infrahub/cli/upgrade.py +56 -9
- infrahub/computed_attribute/tasks.py +20 -8
- infrahub/core/attribute.py +10 -2
- infrahub/core/branch/enums.py +1 -1
- infrahub/core/branch/models.py +7 -3
- infrahub/core/branch/tasks.py +68 -7
- infrahub/core/constants/__init__.py +3 -0
- infrahub/core/diff/calculator.py +2 -2
- infrahub/core/diff/query/artifact.py +1 -0
- infrahub/core/diff/query/delete_query.py +9 -5
- infrahub/core/diff/query/field_summary.py +1 -0
- infrahub/core/diff/query/merge.py +39 -23
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/initialization.py +5 -2
- infrahub/core/migrations/__init__.py +3 -0
- infrahub/core/migrations/exceptions.py +4 -0
- infrahub/core/migrations/graph/__init__.py +12 -13
- infrahub/core/migrations/graph/load_schema_branch.py +21 -0
- infrahub/core/migrations/graph/m013_convert_git_password_credential.py +1 -1
- infrahub/core/migrations/graph/m037_index_attr_vals.py +11 -30
- infrahub/core/migrations/graph/m039_ipam_reconcile.py +9 -7
- infrahub/core/migrations/graph/m040_duplicated_attributes.py +81 -0
- infrahub/core/migrations/graph/m041_deleted_dup_edges.py +149 -0
- infrahub/core/migrations/graph/m042_profile_attrs_in_db.py +147 -0
- infrahub/core/migrations/graph/m043_create_hfid_display_label_in_db.py +164 -0
- infrahub/core/migrations/graph/m044_backfill_hfid_display_label_in_db.py +864 -0
- infrahub/core/migrations/query/__init__.py +7 -8
- infrahub/core/migrations/query/attribute_add.py +8 -6
- infrahub/core/migrations/query/attribute_remove.py +134 -0
- infrahub/core/migrations/runner.py +54 -0
- infrahub/core/migrations/schema/attribute_kind_update.py +9 -3
- infrahub/core/migrations/schema/attribute_supports_profile.py +90 -0
- infrahub/core/migrations/schema/node_attribute_add.py +30 -2
- infrahub/core/migrations/schema/node_attribute_remove.py +13 -109
- infrahub/core/migrations/schema/node_kind_update.py +2 -1
- infrahub/core/migrations/schema/node_remove.py +2 -1
- infrahub/core/migrations/schema/placeholder_dummy.py +3 -2
- infrahub/core/migrations/shared.py +62 -14
- infrahub/core/models.py +2 -2
- infrahub/core/node/__init__.py +42 -12
- infrahub/core/node/create.py +46 -63
- infrahub/core/node/lock_utils.py +70 -44
- infrahub/core/node/resource_manager/ip_address_pool.py +2 -1
- infrahub/core/node/resource_manager/ip_prefix_pool.py +2 -1
- infrahub/core/node/resource_manager/number_pool.py +2 -1
- infrahub/core/query/attribute.py +55 -0
- infrahub/core/query/diff.py +61 -16
- infrahub/core/query/ipam.py +16 -4
- infrahub/core/query/node.py +51 -43
- infrahub/core/query/relationship.py +1 -0
- infrahub/core/relationship/model.py +10 -5
- infrahub/core/schema/__init__.py +56 -0
- infrahub/core/schema/attribute_schema.py +4 -0
- infrahub/core/schema/definitions/core/check.py +1 -1
- infrahub/core/schema/definitions/core/transform.py +1 -1
- infrahub/core/schema/definitions/internal.py +2 -2
- infrahub/core/schema/generated/attribute_schema.py +2 -2
- infrahub/core/schema/manager.py +22 -1
- infrahub/core/schema/schema_branch.py +180 -22
- infrahub/core/schema/schema_branch_display.py +12 -0
- infrahub/core/schema/schema_branch_hfid.py +6 -0
- infrahub/core/validators/uniqueness/checker.py +2 -1
- infrahub/database/__init__.py +0 -13
- infrahub/database/graph.py +21 -0
- infrahub/display_labels/tasks.py +13 -7
- infrahub/events/branch_action.py +27 -1
- infrahub/generators/tasks.py +3 -7
- infrahub/git/base.py +4 -1
- infrahub/git/integrator.py +1 -1
- infrahub/git/models.py +2 -1
- infrahub/git/repository.py +22 -5
- infrahub/git/tasks.py +66 -10
- infrahub/git/utils.py +123 -1
- infrahub/graphql/analyzer.py +9 -0
- infrahub/graphql/api/endpoints.py +14 -4
- infrahub/graphql/manager.py +4 -9
- infrahub/graphql/mutations/branch.py +5 -0
- infrahub/graphql/mutations/convert_object_type.py +11 -1
- infrahub/graphql/mutations/display_label.py +17 -10
- infrahub/graphql/mutations/hfid.py +17 -10
- infrahub/graphql/mutations/ipam.py +54 -35
- infrahub/graphql/mutations/main.py +27 -28
- infrahub/graphql/mutations/proposed_change.py +6 -0
- infrahub/graphql/schema_sort.py +170 -0
- infrahub/graphql/types/branch.py +4 -1
- infrahub/graphql/types/enums.py +3 -0
- infrahub/hfid/tasks.py +13 -7
- infrahub/lock.py +52 -12
- infrahub/message_bus/types.py +3 -1
- infrahub/permissions/constants.py +2 -0
- infrahub/profiles/queries/get_profile_data.py +4 -5
- infrahub/proposed_change/tasks.py +66 -23
- infrahub/server.py +6 -2
- infrahub/services/__init__.py +2 -2
- infrahub/services/adapters/http/__init__.py +5 -0
- infrahub/services/adapters/workflow/worker.py +14 -3
- infrahub/task_manager/event.py +5 -0
- infrahub/task_manager/models.py +7 -0
- infrahub/task_manager/task.py +73 -0
- infrahub/trigger/setup.py +13 -4
- infrahub/trigger/tasks.py +3 -0
- infrahub/workers/dependencies.py +10 -1
- infrahub/workers/infrahub_async.py +10 -2
- infrahub/workflows/catalogue.py +8 -0
- infrahub/workflows/initialization.py +5 -0
- infrahub/workflows/utils.py +2 -1
- infrahub_sdk/analyzer.py +1 -1
- infrahub_sdk/batch.py +2 -2
- infrahub_sdk/branch.py +14 -2
- infrahub_sdk/checks.py +1 -1
- infrahub_sdk/client.py +15 -14
- infrahub_sdk/config.py +29 -2
- infrahub_sdk/ctl/branch.py +3 -0
- infrahub_sdk/ctl/cli_commands.py +2 -0
- infrahub_sdk/ctl/exceptions.py +1 -1
- infrahub_sdk/ctl/schema.py +22 -7
- infrahub_sdk/ctl/task.py +110 -0
- infrahub_sdk/exceptions.py +18 -18
- infrahub_sdk/graphql/query.py +2 -2
- infrahub_sdk/node/attribute.py +1 -1
- infrahub_sdk/node/property.py +1 -1
- infrahub_sdk/node/related_node.py +3 -3
- infrahub_sdk/node/relationship.py +4 -6
- infrahub_sdk/object_store.py +2 -2
- infrahub_sdk/operation.py +1 -1
- infrahub_sdk/protocols_generator/generator.py +1 -1
- infrahub_sdk/pytest_plugin/exceptions.py +9 -9
- infrahub_sdk/pytest_plugin/items/base.py +1 -1
- infrahub_sdk/pytest_plugin/items/check.py +1 -1
- infrahub_sdk/pytest_plugin/items/python_transform.py +1 -1
- infrahub_sdk/repository.py +1 -1
- infrahub_sdk/schema/__init__.py +33 -5
- infrahub_sdk/spec/models.py +7 -0
- infrahub_sdk/spec/object.py +41 -102
- infrahub_sdk/spec/processors/__init__.py +0 -0
- infrahub_sdk/spec/processors/data_processor.py +10 -0
- infrahub_sdk/spec/processors/factory.py +34 -0
- infrahub_sdk/spec/processors/range_expand_processor.py +56 -0
- infrahub_sdk/task/exceptions.py +4 -4
- infrahub_sdk/task/manager.py +2 -2
- infrahub_sdk/task/models.py +6 -4
- infrahub_sdk/timestamp.py +1 -1
- infrahub_sdk/transfer/exporter/json.py +1 -1
- infrahub_sdk/transfer/importer/json.py +1 -1
- infrahub_sdk/transforms.py +1 -1
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/METADATA +4 -2
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/RECORD +168 -152
- infrahub_testcontainers/container.py +144 -6
- infrahub_testcontainers/docker-compose-cluster.test.yml +5 -0
- infrahub_testcontainers/docker-compose.test.yml +5 -0
- infrahub_testcontainers/helpers.py +19 -4
- infrahub_testcontainers/models.py +8 -6
- infrahub_testcontainers/performance_test.py +6 -4
- infrahub/core/migrations/graph/m040_profile_attrs_in_db.py +0 -166
- infrahub/core/migrations/graph/m041_create_hfid_display_label_in_db.py +0 -97
- infrahub/core/migrations/graph/m042_backfill_hfid_display_label_in_db.py +0 -86
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/WHEEL +0 -0
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/entry_points.txt +0 -0
infrahub/cli/db.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
import importlib
|
|
4
3
|
import logging
|
|
5
4
|
import os
|
|
6
5
|
from collections import defaultdict
|
|
@@ -14,13 +13,17 @@ import typer
|
|
|
14
13
|
import ujson
|
|
15
14
|
from infrahub_sdk.async_typer import AsyncTyper
|
|
16
15
|
from prefect.testing.utilities import prefect_test_harness
|
|
17
|
-
from rich import print as rprint
|
|
18
16
|
from rich.console import Console
|
|
19
|
-
from rich.logging import RichHandler
|
|
20
17
|
from rich.table import Table
|
|
21
18
|
|
|
22
19
|
from infrahub import config
|
|
20
|
+
from infrahub.auth import AccountSession, AuthType
|
|
21
|
+
from infrahub.context import InfrahubContext
|
|
23
22
|
from infrahub.core import registry
|
|
23
|
+
from infrahub.core.branch import Branch
|
|
24
|
+
from infrahub.core.branch.enums import BranchStatus
|
|
25
|
+
from infrahub.core.branch.tasks import rebase_branch
|
|
26
|
+
from infrahub.core.constants import GLOBAL_BRANCH_NAME
|
|
24
27
|
from infrahub.core.graph import GRAPH_VERSION
|
|
25
28
|
from infrahub.core.graph.constraints import ConstraintManagerBase, ConstraintManagerMemgraph, ConstraintManagerNeo4j
|
|
26
29
|
from infrahub.core.graph.index import node_indexes, rel_indexes
|
|
@@ -32,25 +35,21 @@ from infrahub.core.graph.schema import (
|
|
|
32
35
|
GraphRelationshipIsPartOf,
|
|
33
36
|
GraphRelationshipProperties,
|
|
34
37
|
)
|
|
35
|
-
from infrahub.core.initialization import
|
|
36
|
-
|
|
37
|
-
get_root_node,
|
|
38
|
-
initialization,
|
|
39
|
-
initialize_registry,
|
|
40
|
-
)
|
|
38
|
+
from infrahub.core.initialization import get_root_node, initialize_registry
|
|
39
|
+
from infrahub.core.migrations.exceptions import MigrationFailureError
|
|
41
40
|
from infrahub.core.migrations.graph import get_graph_migrations, get_migration_by_number
|
|
42
41
|
from infrahub.core.migrations.schema.models import SchemaApplyMigrationData
|
|
43
42
|
from infrahub.core.migrations.schema.tasks import schema_apply_migrations
|
|
43
|
+
from infrahub.core.migrations.shared import get_migration_console
|
|
44
44
|
from infrahub.core.schema import SchemaRoot, core_models, internal_schema
|
|
45
45
|
from infrahub.core.schema.definitions.deprecated import deprecated_models
|
|
46
46
|
from infrahub.core.schema.manager import SchemaManager
|
|
47
|
-
from infrahub.core.utils import delete_all_nodes
|
|
48
47
|
from infrahub.core.validators.models.validate_migration import SchemaValidateMigrationData
|
|
49
48
|
from infrahub.core.validators.tasks import schema_validate_migrations
|
|
50
49
|
from infrahub.database import DatabaseType
|
|
51
50
|
from infrahub.database.memgraph import IndexManagerMemgraph
|
|
52
51
|
from infrahub.database.neo4j import IndexManagerNeo4j
|
|
53
|
-
from infrahub.
|
|
52
|
+
from infrahub.exceptions import ValidationError
|
|
54
53
|
|
|
55
54
|
from .constants import ERROR_BADGE, FAILED_BADGE, SUCCESS_BADGE
|
|
56
55
|
from .db_commands.check_inheritance import check_inheritance
|
|
@@ -65,7 +64,7 @@ def get_timestamp_string() -> str:
|
|
|
65
64
|
|
|
66
65
|
if TYPE_CHECKING:
|
|
67
66
|
from infrahub.cli.context import CliContext
|
|
68
|
-
from infrahub.core.migrations.shared import
|
|
67
|
+
from infrahub.core.migrations.shared import MigrationTypes
|
|
69
68
|
from infrahub.database import InfrahubDatabase
|
|
70
69
|
from infrahub.database.index import IndexManagerBase
|
|
71
70
|
|
|
@@ -94,67 +93,6 @@ def callback() -> None:
|
|
|
94
93
|
"""
|
|
95
94
|
|
|
96
95
|
|
|
97
|
-
@app.command()
|
|
98
|
-
async def init(
|
|
99
|
-
ctx: typer.Context,
|
|
100
|
-
config_file: str = typer.Option(
|
|
101
|
-
"infrahub.toml", envvar="INFRAHUB_CONFIG", help="Location of the configuration file to use for Infrahub"
|
|
102
|
-
),
|
|
103
|
-
) -> None:
|
|
104
|
-
"""Erase the content of the database and initialize it with the core schema."""
|
|
105
|
-
|
|
106
|
-
log = get_logger()
|
|
107
|
-
|
|
108
|
-
# --------------------------------------------------
|
|
109
|
-
# CLEANUP
|
|
110
|
-
# - For now we delete everything in the database
|
|
111
|
-
# TODO, if possible try to implement this in an idempotent way
|
|
112
|
-
# --------------------------------------------------
|
|
113
|
-
|
|
114
|
-
logging.getLogger("neo4j").setLevel(logging.ERROR)
|
|
115
|
-
config.load_and_exit(config_file_name=config_file)
|
|
116
|
-
|
|
117
|
-
context: CliContext = ctx.obj
|
|
118
|
-
dbdriver = await context.init_db(retry=1)
|
|
119
|
-
async with dbdriver.start_transaction() as db:
|
|
120
|
-
log.info("Delete All Nodes")
|
|
121
|
-
await delete_all_nodes(db=db)
|
|
122
|
-
await first_time_initialization(db=db)
|
|
123
|
-
|
|
124
|
-
await dbdriver.close()
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
@app.command()
|
|
128
|
-
async def load_test_data(
|
|
129
|
-
ctx: typer.Context,
|
|
130
|
-
config_file: str = typer.Option(
|
|
131
|
-
"infrahub.toml", envvar="INFRAHUB_CONFIG", help="Location of the configuration file to use for Infrahub"
|
|
132
|
-
),
|
|
133
|
-
dataset: str = "dataset01",
|
|
134
|
-
) -> None:
|
|
135
|
-
"""Load test data into the database from the `test_data` directory."""
|
|
136
|
-
|
|
137
|
-
logging.getLogger("neo4j").setLevel(logging.ERROR)
|
|
138
|
-
config.load_and_exit(config_file_name=config_file)
|
|
139
|
-
|
|
140
|
-
context: CliContext = ctx.obj
|
|
141
|
-
dbdriver = await context.init_db(retry=1)
|
|
142
|
-
|
|
143
|
-
async with dbdriver.start_session() as db:
|
|
144
|
-
await initialization(db=db)
|
|
145
|
-
|
|
146
|
-
log_level = "DEBUG"
|
|
147
|
-
|
|
148
|
-
FORMAT = "%(message)s"
|
|
149
|
-
logging.basicConfig(level=log_level, format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
|
|
150
|
-
logging.getLogger("infrahub")
|
|
151
|
-
|
|
152
|
-
dataset_module = importlib.import_module(f"infrahub.test_data.{dataset}")
|
|
153
|
-
await dataset_module.load_data(db=db)
|
|
154
|
-
|
|
155
|
-
await dbdriver.close()
|
|
156
|
-
|
|
157
|
-
|
|
158
96
|
@app.command(name="migrate")
|
|
159
97
|
async def migrate_cmd(
|
|
160
98
|
ctx: typer.Context,
|
|
@@ -172,7 +110,15 @@ async def migrate_cmd(
|
|
|
172
110
|
context: CliContext = ctx.obj
|
|
173
111
|
dbdriver = await context.init_db(retry=1)
|
|
174
112
|
|
|
175
|
-
await
|
|
113
|
+
root_node = await get_root_node(db=dbdriver)
|
|
114
|
+
migrations = await detect_migration_to_run(
|
|
115
|
+
current_graph_version=root_node.graph_version, migration_number=migration_number
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
if check or not migrations:
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
await migrate_database(db=dbdriver, migrations=migrations, initialize=True)
|
|
176
122
|
|
|
177
123
|
await dbdriver.close()
|
|
178
124
|
|
|
@@ -335,8 +281,38 @@ async def index(
|
|
|
335
281
|
await dbdriver.close()
|
|
336
282
|
|
|
337
283
|
|
|
284
|
+
async def detect_migration_to_run(
|
|
285
|
+
current_graph_version: int, migration_number: int | str | None = None
|
|
286
|
+
) -> Sequence[MigrationTypes]:
|
|
287
|
+
"""Return a sequence of migrations to apply to upgrade the database."""
|
|
288
|
+
get_migration_console().log("Checking current state of the database")
|
|
289
|
+
migrations: list[MigrationTypes] = []
|
|
290
|
+
|
|
291
|
+
if migration_number:
|
|
292
|
+
migration = get_migration_by_number(migration_number)
|
|
293
|
+
migrations.append(migration)
|
|
294
|
+
if current_graph_version > migration.minimum_version:
|
|
295
|
+
get_migration_console().log(
|
|
296
|
+
f"Migration {migration_number} already applied. To apply again, run the command without the --check flag."
|
|
297
|
+
)
|
|
298
|
+
return []
|
|
299
|
+
get_migration_console().log(
|
|
300
|
+
f"Migration {migration_number} needs to be applied. Run `infrahub db migrate` to apply all outstanding migrations."
|
|
301
|
+
)
|
|
302
|
+
else:
|
|
303
|
+
migrations.extend(await get_graph_migrations(current_graph_version=current_graph_version))
|
|
304
|
+
if not migrations:
|
|
305
|
+
get_migration_console().log(f"Database up-to-date (v{current_graph_version}), no migration to execute.")
|
|
306
|
+
return []
|
|
307
|
+
|
|
308
|
+
get_migration_console().log(
|
|
309
|
+
f"Database needs to be updated (v{current_graph_version} -> v{GRAPH_VERSION}), {len(migrations)} migrations pending"
|
|
310
|
+
)
|
|
311
|
+
return migrations
|
|
312
|
+
|
|
313
|
+
|
|
338
314
|
async def migrate_database(
|
|
339
|
-
db: InfrahubDatabase,
|
|
315
|
+
db: InfrahubDatabase, migrations: Sequence[MigrationTypes], initialize: bool = False
|
|
340
316
|
) -> bool:
|
|
341
317
|
"""Apply the latest migrations to the database, this function will print the status directly in the console.
|
|
342
318
|
|
|
@@ -344,40 +320,16 @@ async def migrate_database(
|
|
|
344
320
|
|
|
345
321
|
Args:
|
|
346
322
|
db: The database object.
|
|
347
|
-
|
|
348
|
-
|
|
323
|
+
migrations: Sequence of migrations to apply.
|
|
324
|
+
initialize: Whether to initialize the registry before running migrations.
|
|
349
325
|
"""
|
|
350
|
-
|
|
326
|
+
if not migrations:
|
|
327
|
+
return True
|
|
351
328
|
|
|
352
329
|
if initialize:
|
|
353
330
|
await initialize_registry(db=db)
|
|
354
331
|
|
|
355
332
|
root_node = await get_root_node(db=db)
|
|
356
|
-
if migration_number:
|
|
357
|
-
migration = get_migration_by_number(migration_number)
|
|
358
|
-
migrations: Sequence[GraphMigration | InternalSchemaMigration | ArbitraryMigration] = [migration]
|
|
359
|
-
if check:
|
|
360
|
-
if root_node.graph_version > migration.minimum_version:
|
|
361
|
-
rprint(
|
|
362
|
-
f"Migration {migration_number} already applied. To apply again, run the command without the --check flag."
|
|
363
|
-
)
|
|
364
|
-
return True
|
|
365
|
-
rprint(
|
|
366
|
-
f"Migration {migration_number} needs to be applied. Run `infrahub db migrate` to apply all outstanding migrations."
|
|
367
|
-
)
|
|
368
|
-
return False
|
|
369
|
-
else:
|
|
370
|
-
migrations = await get_graph_migrations(root=root_node)
|
|
371
|
-
if not migrations:
|
|
372
|
-
rprint(f"Database up-to-date (v{root_node.graph_version}), no migration to execute.")
|
|
373
|
-
return True
|
|
374
|
-
|
|
375
|
-
rprint(
|
|
376
|
-
f"Database needs to be updated (v{root_node.graph_version} -> v{GRAPH_VERSION}), {len(migrations)} migrations pending"
|
|
377
|
-
)
|
|
378
|
-
|
|
379
|
-
if check:
|
|
380
|
-
return True
|
|
381
333
|
|
|
382
334
|
for migration in migrations:
|
|
383
335
|
execution_result = await migration.execute(db=db)
|
|
@@ -386,22 +338,71 @@ async def migrate_database(
|
|
|
386
338
|
if execution_result.success:
|
|
387
339
|
validation_result = await migration.validate_migration(db=db)
|
|
388
340
|
if validation_result.success:
|
|
389
|
-
|
|
341
|
+
get_migration_console().log(f"Migration: {migration.name} {SUCCESS_BADGE}")
|
|
390
342
|
root_node.graph_version = migration.minimum_version + 1
|
|
391
343
|
await root_node.save(db=db)
|
|
392
344
|
|
|
393
345
|
if not execution_result.success or (validation_result and not validation_result.success):
|
|
394
|
-
|
|
346
|
+
get_migration_console().log(f"Migration: {migration.name} {FAILED_BADGE}")
|
|
395
347
|
for error in execution_result.errors:
|
|
396
|
-
|
|
348
|
+
get_migration_console().log(f" {error}")
|
|
397
349
|
if validation_result and not validation_result.success:
|
|
398
350
|
for error in validation_result.errors:
|
|
399
|
-
|
|
351
|
+
get_migration_console().log(f" {error}")
|
|
400
352
|
return False
|
|
401
353
|
|
|
402
354
|
return True
|
|
403
355
|
|
|
404
356
|
|
|
357
|
+
async def mark_branches_needing_rebase(db: InfrahubDatabase) -> list[Branch]:
|
|
358
|
+
branches = [b for b in await Branch.get_list(db=db) if b.name not in [registry.default_branch, GLOBAL_BRANCH_NAME]]
|
|
359
|
+
if not branches:
|
|
360
|
+
return []
|
|
361
|
+
|
|
362
|
+
branches_needing_rebase: list[Branch] = []
|
|
363
|
+
for branch in branches:
|
|
364
|
+
if branch.graph_version == GRAPH_VERSION:
|
|
365
|
+
continue
|
|
366
|
+
|
|
367
|
+
branch.status = BranchStatus.NEED_UPGRADE_REBASE
|
|
368
|
+
await branch.save(db=db)
|
|
369
|
+
branches_needing_rebase.append(branch)
|
|
370
|
+
|
|
371
|
+
return branches_needing_rebase
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
async def trigger_rebase_branches(db: InfrahubDatabase, branches: Sequence[Branch]) -> None:
|
|
375
|
+
"""Trigger rebase of non-default branches, also triggering migrations in the process."""
|
|
376
|
+
if not branches:
|
|
377
|
+
return
|
|
378
|
+
|
|
379
|
+
get_migration_console().log(
|
|
380
|
+
f"Planning rebase and migrations for {len(branches)} {'branches' if len(branches) != 1 else 'branch'}: "
|
|
381
|
+
f"{', '.join([b.name for b in branches])}"
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
for branch in branches:
|
|
385
|
+
if branch.graph_version == GRAPH_VERSION:
|
|
386
|
+
get_migration_console().log(
|
|
387
|
+
f"Ignoring branch rebase and migrations for '{branch.name}' (ID: {branch.uuid}), it is already up-to-date"
|
|
388
|
+
)
|
|
389
|
+
continue
|
|
390
|
+
|
|
391
|
+
get_migration_console().print(f"Rebasing branch '{branch.name}' (ID: {branch.uuid})...", end="")
|
|
392
|
+
try:
|
|
393
|
+
await registry.schema.load_schema(db=db, branch=branch)
|
|
394
|
+
await rebase_branch(
|
|
395
|
+
branch=branch.name,
|
|
396
|
+
context=InfrahubContext.init(
|
|
397
|
+
branch=branch, account=AccountSession(auth_type=AuthType.NONE, authenticated=False, account_id="")
|
|
398
|
+
),
|
|
399
|
+
send_events=False,
|
|
400
|
+
)
|
|
401
|
+
get_migration_console().log(SUCCESS_BADGE)
|
|
402
|
+
except (ValidationError, MigrationFailureError):
|
|
403
|
+
get_migration_console().log(FAILED_BADGE)
|
|
404
|
+
|
|
405
|
+
|
|
405
406
|
async def initialize_internal_schema() -> None:
|
|
406
407
|
registry.schema = SchemaManager()
|
|
407
408
|
schema = SchemaRoot(**internal_schema)
|
|
@@ -436,16 +437,16 @@ async def update_core_schema(db: InfrahubDatabase, initialize: bool = True, debu
|
|
|
436
437
|
branch_schema.validate_node_deletions(diff=schema_diff)
|
|
437
438
|
result = branch_schema.validate_update(other=candidate_schema, diff=schema_diff, enforce_update_support=False)
|
|
438
439
|
if result.errors:
|
|
439
|
-
|
|
440
|
+
get_migration_console().log(f"{ERROR_BADGE} | Unable to update the schema, due to failed validations")
|
|
440
441
|
for error in result.errors:
|
|
441
|
-
|
|
442
|
+
get_migration_console().log(error.to_string())
|
|
442
443
|
raise typer.Exit(1)
|
|
443
444
|
|
|
444
445
|
if not result.diff.all:
|
|
445
|
-
|
|
446
|
+
get_migration_console().log("Core Schema Up to date, nothing to update")
|
|
446
447
|
return
|
|
447
448
|
|
|
448
|
-
|
|
449
|
+
get_migration_console().log("Core Schema has diff, will need to be updated")
|
|
449
450
|
if debug:
|
|
450
451
|
result.diff.print()
|
|
451
452
|
|
|
@@ -460,9 +461,9 @@ async def update_core_schema(db: InfrahubDatabase, initialize: bool = True, debu
|
|
|
460
461
|
responses = await schema_validate_migrations(message=validate_migration_data)
|
|
461
462
|
error_messages = [violation.message for response in responses for violation in response.violations]
|
|
462
463
|
if error_messages:
|
|
463
|
-
|
|
464
|
+
get_migration_console().log(f"{ERROR_BADGE} | Unable to update the schema, due to failed validations")
|
|
464
465
|
for message in error_messages:
|
|
465
|
-
|
|
466
|
+
get_migration_console().log(message)
|
|
466
467
|
raise typer.Exit(1)
|
|
467
468
|
|
|
468
469
|
# ----------------------------------------------------------
|
|
@@ -485,9 +486,11 @@ async def update_core_schema(db: InfrahubDatabase, initialize: bool = True, debu
|
|
|
485
486
|
update_db=True,
|
|
486
487
|
)
|
|
487
488
|
default_branch.update_schema_hash()
|
|
488
|
-
|
|
489
|
+
get_migration_console().log(
|
|
490
|
+
"The Core Schema has been updated, make sure to rebase any open branches after the upgrade"
|
|
491
|
+
)
|
|
489
492
|
if debug:
|
|
490
|
-
|
|
493
|
+
get_migration_console().log(f"New schema hash: {default_branch.active_schema_hash.main}")
|
|
491
494
|
await default_branch.save(db=dbt)
|
|
492
495
|
|
|
493
496
|
# ----------------------------------------------------------
|
|
@@ -502,9 +505,9 @@ async def update_core_schema(db: InfrahubDatabase, initialize: bool = True, debu
|
|
|
502
505
|
migration_error_msgs = await schema_apply_migrations(message=apply_migration_data)
|
|
503
506
|
|
|
504
507
|
if migration_error_msgs:
|
|
505
|
-
|
|
508
|
+
get_migration_console().log(f"{ERROR_BADGE} | Some error(s) happened while running the schema migrations")
|
|
506
509
|
for message in migration_error_msgs:
|
|
507
|
-
|
|
510
|
+
get_migration_console().log(message)
|
|
508
511
|
raise typer.Exit(1)
|
|
509
512
|
|
|
510
513
|
|
|
@@ -628,16 +631,16 @@ RETURN vertices, edges
|
|
|
628
631
|
edge_csv_writer.writeheader()
|
|
629
632
|
|
|
630
633
|
while has_more_data:
|
|
631
|
-
|
|
634
|
+
get_migration_console().print("Retrieving batch of vertices and edges...", end="")
|
|
632
635
|
results = await db.execute_query(
|
|
633
636
|
query=query,
|
|
634
637
|
params={"kinds": kinds, "uuids": uuids, "limit": limit, "offset": offset},
|
|
635
638
|
)
|
|
636
|
-
|
|
639
|
+
get_migration_console().print("done. ", end="")
|
|
637
640
|
has_more_data = len(results) >= limit
|
|
638
641
|
offset += limit
|
|
639
642
|
|
|
640
|
-
|
|
643
|
+
get_migration_console().print("Writing batch to export files...", end="")
|
|
641
644
|
for result in results:
|
|
642
645
|
vertices = result.get("vertices")
|
|
643
646
|
for vertex in vertices:
|
|
@@ -668,10 +671,10 @@ RETURN vertices, edges
|
|
|
668
671
|
serial_edge[property_name] = value
|
|
669
672
|
edge_csv_writer.writerow(serial_edge)
|
|
670
673
|
all_db_ids.add(edge.element_id)
|
|
671
|
-
|
|
674
|
+
get_migration_console().log("done.")
|
|
672
675
|
|
|
673
|
-
|
|
674
|
-
|
|
676
|
+
get_migration_console().log(f"{SUCCESS_BADGE} Export complete")
|
|
677
|
+
get_migration_console().log(f"Export directory is here: {export_dir.absolute()}")
|
|
675
678
|
return export_dir
|
|
676
679
|
|
|
677
680
|
|
|
@@ -709,9 +712,9 @@ UNWIND $vertices AS vertex
|
|
|
709
712
|
CREATE (v:ImportNode:%(node_labels)s {db_id: vertex.db_id})
|
|
710
713
|
SET v = vertex
|
|
711
714
|
""" % {"node_labels": ":".join(vertex_labels)}
|
|
712
|
-
|
|
715
|
+
get_migration_console().print(f"Loading {len(vertex_dicts)} {vertex_labels} nodes...", end="")
|
|
713
716
|
await db.execute_query(query=vertex_import_query, params={"vertices": vertex_dicts})
|
|
714
|
-
|
|
717
|
+
get_migration_console().log("done")
|
|
715
718
|
|
|
716
719
|
|
|
717
720
|
async def load_edges(
|
|
@@ -724,17 +727,17 @@ MATCH (b:ImportNode) WHERE b.db_id = toString(edge.end_node_id)
|
|
|
724
727
|
CREATE (a)-[e:%(edge_type)s]->(b)
|
|
725
728
|
SET e = edge.properties
|
|
726
729
|
""" % {"edge_type": edge_type}
|
|
727
|
-
|
|
730
|
+
get_migration_console().print(f"Loading {len(edge_dicts)} {edge_type} edges...", end="")
|
|
728
731
|
await db.execute_query(query=edges_import_query, params={"edges": edge_dicts})
|
|
729
|
-
|
|
732
|
+
get_migration_console().log("done")
|
|
730
733
|
|
|
731
734
|
|
|
732
735
|
async def load_export(db: InfrahubDatabase, export_dir: Path, query_limit: int = 1000) -> None:
|
|
733
736
|
if not export_dir.exists():
|
|
734
|
-
|
|
737
|
+
get_migration_console().log(f"{ERROR_BADGE} {export_dir} does not exist")
|
|
735
738
|
raise typer.Exit(1)
|
|
736
739
|
if not export_dir.is_dir():
|
|
737
|
-
|
|
740
|
+
get_migration_console().log(f"{ERROR_BADGE} {export_dir} is not a directory")
|
|
738
741
|
raise typer.Exit(1)
|
|
739
742
|
vertex_file: Path | None = None
|
|
740
743
|
edge_file: Path | None = None
|
|
@@ -745,17 +748,17 @@ async def load_export(db: InfrahubDatabase, export_dir: Path, query_limit: int =
|
|
|
745
748
|
elif export_file.name == "edges.csv":
|
|
746
749
|
edge_file = export_file
|
|
747
750
|
if not vertex_file or not vertex_file.exists() or not vertex_file.is_file():
|
|
748
|
-
|
|
751
|
+
get_migration_console().log(f"{ERROR_BADGE} File 'vertices.csv' does not exist in the export directory")
|
|
749
752
|
raise typer.Exit(1)
|
|
750
753
|
if not edge_file or not edge_file.exists() or not edge_file.is_file():
|
|
751
|
-
|
|
754
|
+
get_migration_console().log(f"{ERROR_BADGE} File 'edges.csv' does not exist in the export directory")
|
|
752
755
|
raise typer.Exit(1)
|
|
753
756
|
|
|
754
757
|
# index massively improves time required to load a large export
|
|
755
758
|
create_index_query = "CREATE RANGE INDEX import_node_db_id IF NOT EXISTS FOR (v:ImportNode) ON (v.db_id)"
|
|
756
759
|
await db.execute_query(query=create_index_query)
|
|
757
760
|
|
|
758
|
-
|
|
761
|
+
get_migration_console().log("Loading vertices...")
|
|
759
762
|
vertices_by_labels_map: dict[frozenset[str], list[dict[str, Any]]] = defaultdict(list)
|
|
760
763
|
with vertex_file.open() as file:
|
|
761
764
|
csv_reader = DictReader(file)
|
|
@@ -770,9 +773,9 @@ async def load_export(db: InfrahubDatabase, export_dir: Path, query_limit: int =
|
|
|
770
773
|
|
|
771
774
|
for labels, vertex_rows in vertices_by_labels_map.items():
|
|
772
775
|
await load_vertices(db=db, vertex_labels=list(labels), vertex_dicts=vertex_rows)
|
|
773
|
-
|
|
776
|
+
get_migration_console().log("Vertices loaded")
|
|
774
777
|
|
|
775
|
-
|
|
778
|
+
get_migration_console().log("Loading edges...")
|
|
776
779
|
edges_by_type_map: dict[str, list[dict[str, Any]]] = defaultdict(list)
|
|
777
780
|
with edge_file.open() as file:
|
|
778
781
|
csv_reader = DictReader(file)
|
|
@@ -797,8 +800,8 @@ async def load_export(db: InfrahubDatabase, export_dir: Path, query_limit: int =
|
|
|
797
800
|
|
|
798
801
|
for edge_type, edge_dicts in edges_by_type_map.items():
|
|
799
802
|
await load_edges(db=db, edge_type=edge_type, edge_dicts=edge_dicts)
|
|
800
|
-
|
|
801
|
-
|
|
803
|
+
get_migration_console().log("Edges loaded")
|
|
804
|
+
get_migration_console().log(f"{SUCCESS_BADGE} Export loaded")
|
|
802
805
|
|
|
803
806
|
|
|
804
807
|
@app.command(name="check")
|
|
@@ -841,10 +844,10 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
841
844
|
db: The database object.
|
|
842
845
|
output_dir: Directory to save detailed check results.
|
|
843
846
|
"""
|
|
844
|
-
|
|
847
|
+
get_migration_console().log("Running database health checks...")
|
|
845
848
|
|
|
846
849
|
# Check 1: Duplicate active relationships
|
|
847
|
-
|
|
850
|
+
get_migration_console().log("\n[bold cyan]Check 1: Duplicate Active Relationships[/bold cyan]")
|
|
848
851
|
duplicate_active_rels_query = """
|
|
849
852
|
MATCH (a:Node)-[e1:IS_RELATED {status: "active"}]-(r:Relationship)-[e2:IS_RELATED {branch: e1.branch, status: "active"}]-(b:Node)
|
|
850
853
|
WHERE a.uuid < b.uuid
|
|
@@ -868,7 +871,7 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
868
871
|
|
|
869
872
|
results = await db.execute_query(query=duplicate_active_rels_query)
|
|
870
873
|
if results:
|
|
871
|
-
|
|
874
|
+
get_migration_console().log(f"[red]Found {len(results)} duplicate active relationships[/red]")
|
|
872
875
|
# Write detailed results to file
|
|
873
876
|
output_file = output_dir / "duplicate_active_relationships.csv"
|
|
874
877
|
with output_file.open(mode="w", newline="") as f:
|
|
@@ -878,12 +881,12 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
878
881
|
writer.writeheader()
|
|
879
882
|
for result in results:
|
|
880
883
|
writer.writerow(dict(result))
|
|
881
|
-
|
|
884
|
+
get_migration_console().log(f" Detailed results written to: {output_file}")
|
|
882
885
|
else:
|
|
883
|
-
|
|
886
|
+
get_migration_console().log(f"{SUCCESS_BADGE} No duplicate active relationships found")
|
|
884
887
|
|
|
885
888
|
# Check 2: Duplicated relationship nodes
|
|
886
|
-
|
|
889
|
+
get_migration_console().log("\n[bold cyan]Check 2: Duplicated Relationship Nodes[/bold cyan]")
|
|
887
890
|
duplicate_rel_nodes_query = """
|
|
888
891
|
MATCH (r:Relationship)
|
|
889
892
|
WITH r.uuid AS r_uuid, COUNT(*) AS num_rels
|
|
@@ -896,7 +899,7 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
896
899
|
|
|
897
900
|
results = await db.execute_query(query=duplicate_rel_nodes_query)
|
|
898
901
|
if results:
|
|
899
|
-
|
|
902
|
+
get_migration_console().log(f"[red]Found {len(results)} duplicated relationship nodes[/red]")
|
|
900
903
|
# Write detailed results to file
|
|
901
904
|
output_file = output_dir / "duplicated_relationship_nodes.csv"
|
|
902
905
|
with output_file.open(mode="w", newline="") as f:
|
|
@@ -904,12 +907,12 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
904
907
|
writer.writeheader()
|
|
905
908
|
for result in results:
|
|
906
909
|
writer.writerow(dict(result))
|
|
907
|
-
|
|
910
|
+
get_migration_console().log(f" Detailed results written to: {output_file}")
|
|
908
911
|
else:
|
|
909
|
-
|
|
912
|
+
get_migration_console().log(f"{SUCCESS_BADGE} No duplicated relationship nodes found")
|
|
910
913
|
|
|
911
914
|
# Check 3: Duplicated edges
|
|
912
|
-
|
|
915
|
+
get_migration_console().log("\n[bold cyan]Check 3: Duplicated Edges[/bold cyan]")
|
|
913
916
|
duplicate_edges_query = """
|
|
914
917
|
MATCH (a)
|
|
915
918
|
CALL (a) {
|
|
@@ -939,7 +942,7 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
939
942
|
|
|
940
943
|
results = await db.execute_query(query=duplicate_edges_query)
|
|
941
944
|
if results:
|
|
942
|
-
|
|
945
|
+
get_migration_console().log(f"[red]Found {len(results)} sets of duplicated edges[/red]")
|
|
943
946
|
# Write detailed results to file
|
|
944
947
|
output_file = output_dir / "duplicated_edges.csv"
|
|
945
948
|
with output_file.open(mode="w", newline="") as f:
|
|
@@ -950,12 +953,12 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
950
953
|
writer.writeheader()
|
|
951
954
|
for result in results:
|
|
952
955
|
writer.writerow(dict(result))
|
|
953
|
-
|
|
956
|
+
get_migration_console().log(f" Detailed results written to: {output_file}")
|
|
954
957
|
else:
|
|
955
|
-
|
|
958
|
+
get_migration_console().log(f"{SUCCESS_BADGE} No duplicated edges found")
|
|
956
959
|
|
|
957
960
|
# Check 4: Orphaned Relationships
|
|
958
|
-
|
|
961
|
+
get_migration_console().log("\n[bold cyan]Check 4: Orphaned Relationships[/bold cyan]")
|
|
959
962
|
orphaned_rels_query = """
|
|
960
963
|
MATCH (r:Relationship)-[:IS_RELATED]-(peer:Node)
|
|
961
964
|
WITH DISTINCT r, peer
|
|
@@ -973,7 +976,7 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
973
976
|
"""
|
|
974
977
|
results = await db.execute_query(query=orphaned_rels_query)
|
|
975
978
|
if results:
|
|
976
|
-
|
|
979
|
+
get_migration_console().log(f"[red]Found {len(results)} orphaned Relationships[/red]")
|
|
977
980
|
# Write detailed results to file
|
|
978
981
|
output_file = output_dir / "orphaned_relationships.csv"
|
|
979
982
|
with output_file.open(mode="w", newline="") as f:
|
|
@@ -984,9 +987,9 @@ async def run_database_checks(db: InfrahubDatabase, output_dir: Path) -> None:
|
|
|
984
987
|
writer.writeheader()
|
|
985
988
|
for result in results:
|
|
986
989
|
writer.writerow(dict(result))
|
|
987
|
-
|
|
990
|
+
get_migration_console().log(f" Detailed results written to: {output_file}")
|
|
988
991
|
else:
|
|
989
|
-
|
|
992
|
+
get_migration_console().log(f"{SUCCESS_BADGE} No orphaned relationships found")
|
|
990
993
|
|
|
991
|
-
|
|
992
|
-
|
|
994
|
+
get_migration_console().log(f"\n{SUCCESS_BADGE} Database health checks completed")
|
|
995
|
+
get_migration_console().log(f"Detailed results saved to: {output_dir.absolute()}")
|