infrahub-server 1.5.0b1__py3-none-any.whl → 1.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/api/dependencies.py +4 -13
- infrahub/api/internal.py +2 -0
- infrahub/api/oauth2.py +13 -19
- infrahub/api/oidc.py +15 -21
- infrahub/api/schema.py +24 -3
- infrahub/api/transformation.py +22 -20
- infrahub/artifacts/models.py +2 -1
- infrahub/auth.py +137 -3
- infrahub/cli/__init__.py +2 -0
- infrahub/cli/db.py +158 -155
- infrahub/cli/dev.py +118 -0
- infrahub/cli/tasks.py +46 -0
- infrahub/cli/upgrade.py +56 -9
- infrahub/computed_attribute/tasks.py +20 -8
- infrahub/core/attribute.py +10 -2
- infrahub/core/branch/enums.py +1 -1
- infrahub/core/branch/models.py +7 -3
- infrahub/core/branch/tasks.py +68 -7
- infrahub/core/constants/__init__.py +3 -0
- infrahub/core/diff/calculator.py +2 -2
- infrahub/core/diff/query/artifact.py +1 -0
- infrahub/core/diff/query/delete_query.py +9 -5
- infrahub/core/diff/query/field_summary.py +1 -0
- infrahub/core/diff/query/merge.py +39 -23
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/initialization.py +5 -2
- infrahub/core/migrations/__init__.py +3 -0
- infrahub/core/migrations/exceptions.py +4 -0
- infrahub/core/migrations/graph/__init__.py +12 -13
- infrahub/core/migrations/graph/load_schema_branch.py +21 -0
- infrahub/core/migrations/graph/m013_convert_git_password_credential.py +1 -1
- infrahub/core/migrations/graph/m037_index_attr_vals.py +11 -30
- infrahub/core/migrations/graph/m039_ipam_reconcile.py +9 -7
- infrahub/core/migrations/graph/m040_duplicated_attributes.py +81 -0
- infrahub/core/migrations/graph/m041_deleted_dup_edges.py +149 -0
- infrahub/core/migrations/graph/m042_profile_attrs_in_db.py +147 -0
- infrahub/core/migrations/graph/m043_create_hfid_display_label_in_db.py +164 -0
- infrahub/core/migrations/graph/m044_backfill_hfid_display_label_in_db.py +864 -0
- infrahub/core/migrations/query/__init__.py +7 -8
- infrahub/core/migrations/query/attribute_add.py +8 -6
- infrahub/core/migrations/query/attribute_remove.py +134 -0
- infrahub/core/migrations/runner.py +54 -0
- infrahub/core/migrations/schema/attribute_kind_update.py +9 -3
- infrahub/core/migrations/schema/attribute_supports_profile.py +90 -0
- infrahub/core/migrations/schema/node_attribute_add.py +30 -2
- infrahub/core/migrations/schema/node_attribute_remove.py +13 -109
- infrahub/core/migrations/schema/node_kind_update.py +2 -1
- infrahub/core/migrations/schema/node_remove.py +2 -1
- infrahub/core/migrations/schema/placeholder_dummy.py +3 -2
- infrahub/core/migrations/shared.py +62 -14
- infrahub/core/models.py +2 -2
- infrahub/core/node/__init__.py +42 -12
- infrahub/core/node/create.py +46 -63
- infrahub/core/node/lock_utils.py +70 -44
- infrahub/core/node/resource_manager/ip_address_pool.py +2 -1
- infrahub/core/node/resource_manager/ip_prefix_pool.py +2 -1
- infrahub/core/node/resource_manager/number_pool.py +2 -1
- infrahub/core/query/attribute.py +55 -0
- infrahub/core/query/diff.py +61 -16
- infrahub/core/query/ipam.py +16 -4
- infrahub/core/query/node.py +51 -43
- infrahub/core/query/relationship.py +1 -0
- infrahub/core/relationship/model.py +10 -5
- infrahub/core/schema/__init__.py +56 -0
- infrahub/core/schema/attribute_schema.py +4 -0
- infrahub/core/schema/definitions/core/check.py +1 -1
- infrahub/core/schema/definitions/core/transform.py +1 -1
- infrahub/core/schema/definitions/internal.py +2 -2
- infrahub/core/schema/generated/attribute_schema.py +2 -2
- infrahub/core/schema/manager.py +22 -1
- infrahub/core/schema/schema_branch.py +180 -22
- infrahub/core/schema/schema_branch_display.py +12 -0
- infrahub/core/schema/schema_branch_hfid.py +6 -0
- infrahub/core/validators/uniqueness/checker.py +2 -1
- infrahub/database/__init__.py +0 -13
- infrahub/database/graph.py +21 -0
- infrahub/display_labels/tasks.py +13 -7
- infrahub/events/branch_action.py +27 -1
- infrahub/generators/tasks.py +3 -7
- infrahub/git/base.py +4 -1
- infrahub/git/integrator.py +1 -1
- infrahub/git/models.py +2 -1
- infrahub/git/repository.py +22 -5
- infrahub/git/tasks.py +66 -10
- infrahub/git/utils.py +123 -1
- infrahub/graphql/analyzer.py +9 -0
- infrahub/graphql/api/endpoints.py +14 -4
- infrahub/graphql/manager.py +4 -9
- infrahub/graphql/mutations/branch.py +5 -0
- infrahub/graphql/mutations/convert_object_type.py +11 -1
- infrahub/graphql/mutations/display_label.py +17 -10
- infrahub/graphql/mutations/hfid.py +17 -10
- infrahub/graphql/mutations/ipam.py +54 -35
- infrahub/graphql/mutations/main.py +27 -28
- infrahub/graphql/mutations/proposed_change.py +6 -0
- infrahub/graphql/schema_sort.py +170 -0
- infrahub/graphql/types/branch.py +4 -1
- infrahub/graphql/types/enums.py +3 -0
- infrahub/hfid/tasks.py +13 -7
- infrahub/lock.py +52 -12
- infrahub/message_bus/types.py +3 -1
- infrahub/permissions/constants.py +2 -0
- infrahub/profiles/queries/get_profile_data.py +4 -5
- infrahub/proposed_change/tasks.py +66 -23
- infrahub/server.py +6 -2
- infrahub/services/__init__.py +2 -2
- infrahub/services/adapters/http/__init__.py +5 -0
- infrahub/services/adapters/workflow/worker.py +14 -3
- infrahub/task_manager/event.py +5 -0
- infrahub/task_manager/models.py +7 -0
- infrahub/task_manager/task.py +73 -0
- infrahub/trigger/setup.py +13 -4
- infrahub/trigger/tasks.py +3 -0
- infrahub/workers/dependencies.py +10 -1
- infrahub/workers/infrahub_async.py +10 -2
- infrahub/workflows/catalogue.py +8 -0
- infrahub/workflows/initialization.py +5 -0
- infrahub/workflows/utils.py +2 -1
- infrahub_sdk/analyzer.py +1 -1
- infrahub_sdk/batch.py +2 -2
- infrahub_sdk/branch.py +14 -2
- infrahub_sdk/checks.py +1 -1
- infrahub_sdk/client.py +15 -14
- infrahub_sdk/config.py +29 -2
- infrahub_sdk/ctl/branch.py +3 -0
- infrahub_sdk/ctl/cli_commands.py +2 -0
- infrahub_sdk/ctl/exceptions.py +1 -1
- infrahub_sdk/ctl/schema.py +22 -7
- infrahub_sdk/ctl/task.py +110 -0
- infrahub_sdk/exceptions.py +18 -18
- infrahub_sdk/graphql/query.py +2 -2
- infrahub_sdk/node/attribute.py +1 -1
- infrahub_sdk/node/property.py +1 -1
- infrahub_sdk/node/related_node.py +3 -3
- infrahub_sdk/node/relationship.py +4 -6
- infrahub_sdk/object_store.py +2 -2
- infrahub_sdk/operation.py +1 -1
- infrahub_sdk/protocols_generator/generator.py +1 -1
- infrahub_sdk/pytest_plugin/exceptions.py +9 -9
- infrahub_sdk/pytest_plugin/items/base.py +1 -1
- infrahub_sdk/pytest_plugin/items/check.py +1 -1
- infrahub_sdk/pytest_plugin/items/python_transform.py +1 -1
- infrahub_sdk/repository.py +1 -1
- infrahub_sdk/schema/__init__.py +33 -5
- infrahub_sdk/spec/models.py +7 -0
- infrahub_sdk/spec/object.py +41 -102
- infrahub_sdk/spec/processors/__init__.py +0 -0
- infrahub_sdk/spec/processors/data_processor.py +10 -0
- infrahub_sdk/spec/processors/factory.py +34 -0
- infrahub_sdk/spec/processors/range_expand_processor.py +56 -0
- infrahub_sdk/task/exceptions.py +4 -4
- infrahub_sdk/task/manager.py +2 -2
- infrahub_sdk/task/models.py +6 -4
- infrahub_sdk/timestamp.py +1 -1
- infrahub_sdk/transfer/exporter/json.py +1 -1
- infrahub_sdk/transfer/importer/json.py +1 -1
- infrahub_sdk/transforms.py +1 -1
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/METADATA +4 -2
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/RECORD +168 -152
- infrahub_testcontainers/container.py +144 -6
- infrahub_testcontainers/docker-compose-cluster.test.yml +5 -0
- infrahub_testcontainers/docker-compose.test.yml +5 -0
- infrahub_testcontainers/helpers.py +19 -4
- infrahub_testcontainers/models.py +8 -6
- infrahub_testcontainers/performance_test.py +6 -4
- infrahub/core/migrations/graph/m040_profile_attrs_in_db.py +0 -166
- infrahub/core/migrations/graph/m041_create_hfid_display_label_in_db.py +0 -97
- infrahub/core/migrations/graph/m042_backfill_hfid_display_label_in_db.py +0 -86
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/WHEEL +0 -0
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/entry_points.txt +0 -0
infrahub/cli/dev.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import importlib
|
|
4
|
+
import logging
|
|
5
|
+
from pathlib import Path # noqa: TC003
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
7
|
+
|
|
8
|
+
import typer
|
|
9
|
+
from graphql import parse, print_ast, print_schema
|
|
10
|
+
from infrahub_sdk.async_typer import AsyncTyper
|
|
11
|
+
from rich.logging import RichHandler
|
|
12
|
+
|
|
13
|
+
from infrahub import config
|
|
14
|
+
from infrahub.core.initialization import (
|
|
15
|
+
first_time_initialization,
|
|
16
|
+
initialization,
|
|
17
|
+
)
|
|
18
|
+
from infrahub.core.schema import SchemaRoot, core_models, internal_schema
|
|
19
|
+
from infrahub.core.schema.schema_branch import SchemaBranch
|
|
20
|
+
from infrahub.core.utils import delete_all_nodes
|
|
21
|
+
from infrahub.graphql.manager import GraphQLSchemaManager
|
|
22
|
+
from infrahub.graphql.schema_sort import sort_schema_ast
|
|
23
|
+
from infrahub.log import get_logger
|
|
24
|
+
|
|
25
|
+
if TYPE_CHECKING:
|
|
26
|
+
from infrahub.cli.context import CliContext
|
|
27
|
+
|
|
28
|
+
app = AsyncTyper()
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@app.command(name="export-graphql-schema")
|
|
32
|
+
async def export_graphql_schema(
|
|
33
|
+
ctx: typer.Context, # noqa: ARG001
|
|
34
|
+
config_file: str = typer.Option("infrahub.toml", envvar="INFRAHUB_CONFIG"),
|
|
35
|
+
out: Path = typer.Option("schema.graphql"), # noqa: B008
|
|
36
|
+
) -> None:
|
|
37
|
+
"""Export the Core GraphQL schema to a file."""
|
|
38
|
+
|
|
39
|
+
config.load_and_exit(config_file_name=config_file)
|
|
40
|
+
|
|
41
|
+
schema = SchemaRoot(**internal_schema)
|
|
42
|
+
full_schema = schema.merge(schema=SchemaRoot(**core_models))
|
|
43
|
+
|
|
44
|
+
schema_branch = SchemaBranch(cache={}, name="default")
|
|
45
|
+
schema_branch.load_schema(schema=full_schema)
|
|
46
|
+
|
|
47
|
+
schema_branch.process()
|
|
48
|
+
|
|
49
|
+
gqlm = GraphQLSchemaManager(schema=schema_branch)
|
|
50
|
+
gql_schema = gqlm.generate()
|
|
51
|
+
|
|
52
|
+
schema_str = print_schema(gql_schema)
|
|
53
|
+
schema_ast = parse(schema_str)
|
|
54
|
+
sorted_schema_ast = sort_schema_ast(schema_ast)
|
|
55
|
+
sorted_schema_str = print_ast(sorted_schema_ast)
|
|
56
|
+
|
|
57
|
+
out.write_text(sorted_schema_str)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@app.command(name="db-init")
|
|
61
|
+
async def database_init(
|
|
62
|
+
ctx: typer.Context,
|
|
63
|
+
config_file: str = typer.Option(
|
|
64
|
+
"infrahub.toml", envvar="INFRAHUB_CONFIG", help="Location of the configuration file to use for Infrahub"
|
|
65
|
+
),
|
|
66
|
+
) -> None:
|
|
67
|
+
"""Erase the content of the database and initialize it with the core schema."""
|
|
68
|
+
|
|
69
|
+
log = get_logger()
|
|
70
|
+
|
|
71
|
+
# --------------------------------------------------
|
|
72
|
+
# CLEANUP
|
|
73
|
+
# - For now we delete everything in the database
|
|
74
|
+
# TODO, if possible try to implement this in an idempotent way
|
|
75
|
+
# --------------------------------------------------
|
|
76
|
+
|
|
77
|
+
logging.getLogger("neo4j").setLevel(logging.ERROR)
|
|
78
|
+
config.load_and_exit(config_file_name=config_file)
|
|
79
|
+
|
|
80
|
+
context: CliContext = ctx.obj
|
|
81
|
+
dbdriver = await context.init_db(retry=1)
|
|
82
|
+
async with dbdriver.start_transaction() as db:
|
|
83
|
+
log.info("Delete All Nodes")
|
|
84
|
+
await delete_all_nodes(db=db)
|
|
85
|
+
await first_time_initialization(db=db)
|
|
86
|
+
|
|
87
|
+
await dbdriver.close()
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
@app.command(name="load-test-data")
|
|
91
|
+
async def load_test_data(
|
|
92
|
+
ctx: typer.Context,
|
|
93
|
+
config_file: str = typer.Option(
|
|
94
|
+
"infrahub.toml", envvar="INFRAHUB_CONFIG", help="Location of the configuration file to use for Infrahub"
|
|
95
|
+
),
|
|
96
|
+
dataset: str = "dataset01",
|
|
97
|
+
) -> None:
|
|
98
|
+
"""Load test data into the database from the `test_data` directory."""
|
|
99
|
+
|
|
100
|
+
logging.getLogger("neo4j").setLevel(logging.ERROR)
|
|
101
|
+
config.load_and_exit(config_file_name=config_file)
|
|
102
|
+
|
|
103
|
+
context: CliContext = ctx.obj
|
|
104
|
+
dbdriver = await context.init_db(retry=1)
|
|
105
|
+
|
|
106
|
+
async with dbdriver.start_session() as db:
|
|
107
|
+
await initialization(db=db)
|
|
108
|
+
|
|
109
|
+
log_level = "DEBUG"
|
|
110
|
+
|
|
111
|
+
FORMAT = "%(message)s"
|
|
112
|
+
logging.basicConfig(level=log_level, format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
|
|
113
|
+
logging.getLogger("infrahub")
|
|
114
|
+
|
|
115
|
+
dataset_module = importlib.import_module(f"infrahub.test_data.{dataset}")
|
|
116
|
+
await dataset_module.load_data(db=db)
|
|
117
|
+
|
|
118
|
+
await dbdriver.close()
|
infrahub/cli/tasks.py
CHANGED
|
@@ -3,9 +3,11 @@ import logging
|
|
|
3
3
|
import typer
|
|
4
4
|
from infrahub_sdk.async_typer import AsyncTyper
|
|
5
5
|
from prefect.client.orchestration import get_client
|
|
6
|
+
from prefect.client.schemas.objects import StateType
|
|
6
7
|
|
|
7
8
|
from infrahub import config
|
|
8
9
|
from infrahub.services.adapters.workflow.worker import WorkflowWorkerExecution
|
|
10
|
+
from infrahub.task_manager.task import PrefectTask
|
|
9
11
|
from infrahub.tasks.dummy import DUMMY_FLOW, DummyInput
|
|
10
12
|
from infrahub.workflows.initialization import setup_task_manager
|
|
11
13
|
from infrahub.workflows.models import WorkerPoolDefinition
|
|
@@ -50,3 +52,47 @@ async def execute(
|
|
|
50
52
|
workflow=DUMMY_FLOW, parameters={"data": DummyInput(firstname="John", lastname="Doe")}
|
|
51
53
|
) # type: ignore[var-annotated]
|
|
52
54
|
print(result)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
flush_app = AsyncTyper()
|
|
58
|
+
|
|
59
|
+
app.add_typer(flush_app, name="flush")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@flush_app.command()
|
|
63
|
+
async def flow_runs(
|
|
64
|
+
ctx: typer.Context, # noqa: ARG001
|
|
65
|
+
config_file: str = typer.Argument("infrahub.toml", envvar="INFRAHUB_CONFIG"),
|
|
66
|
+
days_to_keep: int = 30,
|
|
67
|
+
batch_size: int = 100,
|
|
68
|
+
) -> None:
|
|
69
|
+
"""Flush old task runs"""
|
|
70
|
+
logging.getLogger("infrahub").setLevel(logging.WARNING)
|
|
71
|
+
logging.getLogger("neo4j").setLevel(logging.ERROR)
|
|
72
|
+
logging.getLogger("prefect").setLevel(logging.ERROR)
|
|
73
|
+
|
|
74
|
+
config.load_and_exit(config_file_name=config_file)
|
|
75
|
+
|
|
76
|
+
await PrefectTask.delete_flow_runs(
|
|
77
|
+
days_to_keep=days_to_keep,
|
|
78
|
+
batch_size=batch_size,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
@flush_app.command()
|
|
83
|
+
async def stale_runs(
|
|
84
|
+
ctx: typer.Context, # noqa: ARG001
|
|
85
|
+
config_file: str = typer.Argument("infrahub.toml", envvar="INFRAHUB_CONFIG"),
|
|
86
|
+
days_to_keep: int = 2,
|
|
87
|
+
batch_size: int = 100,
|
|
88
|
+
) -> None:
|
|
89
|
+
"""Flush stale task runs"""
|
|
90
|
+
logging.getLogger("infrahub").setLevel(logging.WARNING)
|
|
91
|
+
logging.getLogger("neo4j").setLevel(logging.ERROR)
|
|
92
|
+
logging.getLogger("prefect").setLevel(logging.ERROR)
|
|
93
|
+
|
|
94
|
+
config.load_and_exit(config_file_name=config_file)
|
|
95
|
+
|
|
96
|
+
await PrefectTask.delete_flow_runs(
|
|
97
|
+
states=[StateType.RUNNING], delete=False, days_to_keep=days_to_keep, batch_size=batch_size
|
|
98
|
+
)
|
infrahub/cli/upgrade.py
CHANGED
|
@@ -8,13 +8,19 @@ import typer
|
|
|
8
8
|
from deepdiff import DeepDiff
|
|
9
9
|
from infrahub_sdk.async_typer import AsyncTyper
|
|
10
10
|
from prefect.client.orchestration import get_client
|
|
11
|
-
from rich import print as rprint
|
|
12
11
|
|
|
13
12
|
from infrahub import config
|
|
14
|
-
from infrahub.core.initialization import
|
|
13
|
+
from infrahub.core.initialization import (
|
|
14
|
+
create_anonymous_role,
|
|
15
|
+
create_default_account_groups,
|
|
16
|
+
get_root_node,
|
|
17
|
+
initialize_registry,
|
|
18
|
+
)
|
|
15
19
|
from infrahub.core.manager import NodeManager
|
|
20
|
+
from infrahub.core.migrations.shared import get_migration_console
|
|
16
21
|
from infrahub.core.protocols import CoreAccount, CoreObjectPermission
|
|
17
22
|
from infrahub.dependencies.registry import build_component_registry
|
|
23
|
+
from infrahub.lock import initialize_lock
|
|
18
24
|
from infrahub.menu.menu import default_menu
|
|
19
25
|
from infrahub.menu.models import MenuDict
|
|
20
26
|
from infrahub.menu.repository import MenuRepository
|
|
@@ -26,13 +32,22 @@ from infrahub.workflows.initialization import (
|
|
|
26
32
|
setup_worker_pools,
|
|
27
33
|
)
|
|
28
34
|
|
|
29
|
-
from .db import
|
|
35
|
+
from .db import (
|
|
36
|
+
detect_migration_to_run,
|
|
37
|
+
initialize_internal_schema,
|
|
38
|
+
mark_branches_needing_rebase,
|
|
39
|
+
migrate_database,
|
|
40
|
+
trigger_rebase_branches,
|
|
41
|
+
update_core_schema,
|
|
42
|
+
)
|
|
30
43
|
|
|
31
44
|
if TYPE_CHECKING:
|
|
32
45
|
from infrahub.cli.context import CliContext
|
|
46
|
+
from infrahub.core.branch.models import Branch
|
|
33
47
|
from infrahub.database import InfrahubDatabase
|
|
34
48
|
|
|
35
49
|
app = AsyncTyper()
|
|
50
|
+
console = get_migration_console()
|
|
36
51
|
|
|
37
52
|
|
|
38
53
|
@app.command(name="upgrade")
|
|
@@ -40,6 +55,10 @@ async def upgrade_cmd(
|
|
|
40
55
|
ctx: typer.Context,
|
|
41
56
|
config_file: str = typer.Argument("infrahub.toml", envvar="INFRAHUB_CONFIG"),
|
|
42
57
|
check: bool = typer.Option(False, help="Check the state of the system without upgrading."),
|
|
58
|
+
rebase_branches: bool = typer.Option(False, help="Rebase and apply migrations to branches if required."),
|
|
59
|
+
interactive: bool = typer.Option(
|
|
60
|
+
False, help="Use interactive prompt to accept or deny rebase of individual branches."
|
|
61
|
+
),
|
|
43
62
|
) -> None:
|
|
44
63
|
"""Upgrade Infrahub to the latest version."""
|
|
45
64
|
|
|
@@ -54,9 +73,12 @@ async def upgrade_cmd(
|
|
|
54
73
|
dbdriver = await context.init_db(retry=1)
|
|
55
74
|
|
|
56
75
|
await initialize_registry(db=dbdriver)
|
|
76
|
+
initialize_lock()
|
|
57
77
|
|
|
58
78
|
build_component_registry()
|
|
59
79
|
|
|
80
|
+
root_node = await get_root_node(db=dbdriver)
|
|
81
|
+
|
|
60
82
|
# NOTE add step to validate if the database and the task manager are reachable
|
|
61
83
|
|
|
62
84
|
# -------------------------------------------
|
|
@@ -67,9 +89,14 @@ async def upgrade_cmd(
|
|
|
67
89
|
# Upgrade Infrahub Database and Schema
|
|
68
90
|
# -------------------------------------------
|
|
69
91
|
|
|
70
|
-
|
|
92
|
+
migrations = await detect_migration_to_run(current_graph_version=root_node.graph_version)
|
|
93
|
+
if check:
|
|
94
|
+
await dbdriver.close()
|
|
95
|
+
return
|
|
96
|
+
|
|
97
|
+
if not await migrate_database(db=dbdriver, initialize=False, migrations=migrations):
|
|
71
98
|
# A migration failed, stop the upgrade process
|
|
72
|
-
|
|
99
|
+
console.log("Upgrade cancelled due to migration failure.")
|
|
73
100
|
await dbdriver.close()
|
|
74
101
|
return
|
|
75
102
|
|
|
@@ -91,6 +118,26 @@ async def upgrade_cmd(
|
|
|
91
118
|
await setup_deployments(client=client)
|
|
92
119
|
await trigger_configure_all()
|
|
93
120
|
|
|
121
|
+
# -------------------------------------------
|
|
122
|
+
# Perform branch rebase and apply migrations to them
|
|
123
|
+
# -------------------------------------------
|
|
124
|
+
branches = await mark_branches_needing_rebase(db=dbdriver)
|
|
125
|
+
plural = len(branches) != 1
|
|
126
|
+
get_migration_console().log(
|
|
127
|
+
f"Found {len(branches)} {'branches' if plural else 'branch'} that {'need' if plural else 'needs'} to be rebased"
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
if rebase_branches:
|
|
131
|
+
branches_to_rebase: list[Branch] = []
|
|
132
|
+
if not interactive:
|
|
133
|
+
branches_to_rebase = branches
|
|
134
|
+
else:
|
|
135
|
+
for branch in branches:
|
|
136
|
+
if typer.confirm(f"Rebase branch {branch.name}?"):
|
|
137
|
+
branches_to_rebase.append(branch)
|
|
138
|
+
|
|
139
|
+
await trigger_rebase_branches(db=dbdriver, branches=branches_to_rebase)
|
|
140
|
+
|
|
94
141
|
await dbdriver.close()
|
|
95
142
|
|
|
96
143
|
|
|
@@ -107,21 +154,21 @@ async def upgrade_menu(db: InfrahubDatabase) -> None:
|
|
|
107
154
|
diff_menu = DeepDiff(menu_items.to_rest(), default_menu_dict.to_rest(), ignore_order=True)
|
|
108
155
|
|
|
109
156
|
if not diff_menu:
|
|
110
|
-
|
|
157
|
+
console.log("Menu Up to date, nothing to update")
|
|
111
158
|
return
|
|
112
159
|
|
|
113
160
|
await menu_repository.update_menu(existing_menu=menu_items, new_menu=default_menu_dict, menu_nodes=menu_nodes)
|
|
114
|
-
|
|
161
|
+
console.log("Menu has been updated")
|
|
115
162
|
|
|
116
163
|
|
|
117
164
|
async def upgrade_permissions(db: InfrahubDatabase) -> None:
|
|
118
165
|
existing_permissions = await NodeManager.query(schema=CoreObjectPermission, db=db, limit=1)
|
|
119
166
|
if existing_permissions:
|
|
120
|
-
|
|
167
|
+
console.log("Permissions Up to date, nothing to update")
|
|
121
168
|
return
|
|
122
169
|
|
|
123
170
|
await setup_permissions(db=db)
|
|
124
|
-
|
|
171
|
+
console.log("Permissions have been updated")
|
|
125
172
|
|
|
126
173
|
|
|
127
174
|
async def setup_permissions(db: InfrahubDatabase) -> None:
|
|
@@ -2,6 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
from typing import TYPE_CHECKING
|
|
4
4
|
|
|
5
|
+
from infrahub_sdk.exceptions import URLNotFoundError
|
|
5
6
|
from infrahub_sdk.protocols import CoreTransformPython
|
|
6
7
|
from infrahub_sdk.template import Jinja2Template
|
|
7
8
|
from prefect import flow
|
|
@@ -104,7 +105,7 @@ async def process_transform(
|
|
|
104
105
|
) # type: ignore[misc]
|
|
105
106
|
|
|
106
107
|
data = await client.query_gql_query(
|
|
107
|
-
name=transform.query.
|
|
108
|
+
name=transform.query.id,
|
|
108
109
|
branch_name=branch_name,
|
|
109
110
|
variables={"id": object_id},
|
|
110
111
|
update_group=True,
|
|
@@ -177,12 +178,17 @@ async def computed_attribute_jinja2_update_value(
|
|
|
177
178
|
log.debug(f"Ignoring to update {obj} with existing value on {attribute_name}={value}")
|
|
178
179
|
return
|
|
179
180
|
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
181
|
+
try:
|
|
182
|
+
await client.execute_graphql(
|
|
183
|
+
query=UPDATE_ATTRIBUTE,
|
|
184
|
+
variables={"id": obj.node_id, "kind": node_kind, "attribute": attribute_name, "value": value},
|
|
185
|
+
branch_name=branch_name,
|
|
186
|
+
)
|
|
187
|
+
log.info(f"Updating computed attribute {node_kind}.{attribute_name}='{value}' ({obj.node_id})")
|
|
188
|
+
except URLNotFoundError:
|
|
189
|
+
log.warning(
|
|
190
|
+
f"Update of computed attribute {node_kind}.{attribute_name} failed for branch {branch_name} (not found)"
|
|
191
|
+
)
|
|
186
192
|
|
|
187
193
|
|
|
188
194
|
@flow(
|
|
@@ -229,7 +235,13 @@ async def process_jinja2(
|
|
|
229
235
|
|
|
230
236
|
for id_filter in computed_macro.node_filters:
|
|
231
237
|
query = attribute_graphql.render_graphql_query(query_filter=id_filter, filter_id=object_id)
|
|
232
|
-
|
|
238
|
+
try:
|
|
239
|
+
response = await client.execute_graphql(query=query, branch_name=branch_name)
|
|
240
|
+
except URLNotFoundError:
|
|
241
|
+
log.warning(
|
|
242
|
+
f"Process computed attributes for {computed_attribute_kind}.{computed_attribute_name} failed for branch {branch_name} (not found)"
|
|
243
|
+
)
|
|
244
|
+
return
|
|
233
245
|
output = attribute_graphql.parse_response(response=response)
|
|
234
246
|
found.extend(output)
|
|
235
247
|
|
infrahub/core/attribute.py
CHANGED
|
@@ -18,6 +18,7 @@ from infrahub.core.changelog.models import AttributeChangelog
|
|
|
18
18
|
from infrahub.core.constants import NULL_VALUE, AttributeDBNodeType, BranchSupportType, RelationshipStatus
|
|
19
19
|
from infrahub.core.property import FlagPropertyMixin, NodePropertyData, NodePropertyMixin
|
|
20
20
|
from infrahub.core.query.attribute import (
|
|
21
|
+
AttributeClearNodePropertyQuery,
|
|
21
22
|
AttributeGetQuery,
|
|
22
23
|
AttributeUpdateFlagQuery,
|
|
23
24
|
AttributeUpdateNodePropertyQuery,
|
|
@@ -491,6 +492,12 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin):
|
|
|
491
492
|
)
|
|
492
493
|
await query.execute(db=db)
|
|
493
494
|
|
|
495
|
+
if needs_clear:
|
|
496
|
+
query = await AttributeClearNodePropertyQuery.init(
|
|
497
|
+
db=db, attr=self, at=update_at, prop_name=prop_name, prop_id=database_prop_id
|
|
498
|
+
)
|
|
499
|
+
await query.execute(db=db)
|
|
500
|
+
|
|
494
501
|
# set the to time on the previously active edge
|
|
495
502
|
rel = current_attr_result.get(f"rel_{prop_name}")
|
|
496
503
|
if rel and rel.get("branch") == branch.name:
|
|
@@ -581,7 +588,7 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin):
|
|
|
581
588
|
|
|
582
589
|
return value
|
|
583
590
|
|
|
584
|
-
async def from_graphql(self, data: dict, db: InfrahubDatabase) -> bool:
|
|
591
|
+
async def from_graphql(self, data: dict, db: InfrahubDatabase, process_pools: bool = True) -> bool:
|
|
585
592
|
"""Update attr from GraphQL payload"""
|
|
586
593
|
|
|
587
594
|
changed = False
|
|
@@ -595,7 +602,8 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin):
|
|
|
595
602
|
changed = True
|
|
596
603
|
elif "from_pool" in data:
|
|
597
604
|
self.from_pool = data["from_pool"]
|
|
598
|
-
|
|
605
|
+
if process_pools:
|
|
606
|
+
await self.node.handle_pool(db=db, attribute=self, errors=[])
|
|
599
607
|
changed = True
|
|
600
608
|
|
|
601
609
|
if changed and self.is_from_profile:
|
infrahub/core/branch/enums.py
CHANGED
infrahub/core/branch/models.py
CHANGED
|
@@ -6,9 +6,8 @@ from typing import TYPE_CHECKING, Any, Optional, Self, Union
|
|
|
6
6
|
from pydantic import Field, field_validator
|
|
7
7
|
|
|
8
8
|
from infrahub.core.branch.enums import BranchStatus
|
|
9
|
-
from infrahub.core.constants import
|
|
10
|
-
|
|
11
|
-
)
|
|
9
|
+
from infrahub.core.constants import GLOBAL_BRANCH_NAME
|
|
10
|
+
from infrahub.core.graph import GRAPH_VERSION
|
|
12
11
|
from infrahub.core.models import SchemaBranchHash # noqa: TC001
|
|
13
12
|
from infrahub.core.node.standard import StandardNode
|
|
14
13
|
from infrahub.core.query import QueryType
|
|
@@ -46,6 +45,7 @@ class Branch(StandardNode):
|
|
|
46
45
|
is_isolated: bool = True
|
|
47
46
|
schema_changed_at: Optional[str] = None
|
|
48
47
|
schema_hash: Optional[SchemaBranchHash] = None
|
|
48
|
+
graph_version: int | None = None
|
|
49
49
|
|
|
50
50
|
_exclude_attrs: list[str] = ["id", "uuid", "owner"]
|
|
51
51
|
|
|
@@ -261,6 +261,10 @@ class Branch(StandardNode):
|
|
|
261
261
|
|
|
262
262
|
return start, end
|
|
263
263
|
|
|
264
|
+
async def create(self, db: InfrahubDatabase) -> bool:
|
|
265
|
+
self.graph_version = GRAPH_VERSION
|
|
266
|
+
return await super().create(db=db)
|
|
267
|
+
|
|
264
268
|
async def delete(self, db: InfrahubDatabase) -> None:
|
|
265
269
|
if self.is_default:
|
|
266
270
|
raise ValidationError(f"Unable to delete {self.name} it is the default branch.")
|
infrahub/core/branch/tasks.py
CHANGED
|
@@ -12,6 +12,7 @@ from infrahub import lock
|
|
|
12
12
|
from infrahub.context import InfrahubContext # noqa: TC001 needed for prefect flow
|
|
13
13
|
from infrahub.core import registry
|
|
14
14
|
from infrahub.core.branch import Branch
|
|
15
|
+
from infrahub.core.branch.enums import BranchStatus
|
|
15
16
|
from infrahub.core.changelog.diff import DiffChangelogCollector, MigrationTracker
|
|
16
17
|
from infrahub.core.constants import MutationAction
|
|
17
18
|
from infrahub.core.diff.coordinator import DiffCoordinator
|
|
@@ -21,7 +22,10 @@ from infrahub.core.diff.merger.merger import DiffMerger
|
|
|
21
22
|
from infrahub.core.diff.model.path import BranchTrackingId, EnrichedDiffRoot, EnrichedDiffRootMetadata
|
|
22
23
|
from infrahub.core.diff.models import RequestDiffUpdate
|
|
23
24
|
from infrahub.core.diff.repository.repository import DiffRepository
|
|
25
|
+
from infrahub.core.graph import GRAPH_VERSION
|
|
24
26
|
from infrahub.core.merge import BranchMerger
|
|
27
|
+
from infrahub.core.migrations.exceptions import MigrationFailureError
|
|
28
|
+
from infrahub.core.migrations.runner import MigrationRunner
|
|
25
29
|
from infrahub.core.migrations.schema.models import SchemaApplyMigrationData
|
|
26
30
|
from infrahub.core.migrations.schema.tasks import schema_apply_migrations
|
|
27
31
|
from infrahub.core.timestamp import Timestamp
|
|
@@ -29,7 +33,13 @@ from infrahub.core.validators.determiner import ConstraintValidatorDeterminer
|
|
|
29
33
|
from infrahub.core.validators.models.validate_migration import SchemaValidateMigrationData
|
|
30
34
|
from infrahub.core.validators.tasks import schema_validate_migrations
|
|
31
35
|
from infrahub.dependencies.registry import get_component_registry
|
|
32
|
-
from infrahub.events.branch_action import
|
|
36
|
+
from infrahub.events.branch_action import (
|
|
37
|
+
BranchCreatedEvent,
|
|
38
|
+
BranchDeletedEvent,
|
|
39
|
+
BranchMergedEvent,
|
|
40
|
+
BranchMigratedEvent,
|
|
41
|
+
BranchRebasedEvent,
|
|
42
|
+
)
|
|
33
43
|
from infrahub.events.models import EventMeta, InfrahubEvent
|
|
34
44
|
from infrahub.events.node_action import get_node_event
|
|
35
45
|
from infrahub.exceptions import BranchNotFoundError, ValidationError
|
|
@@ -49,8 +59,57 @@ from infrahub.workflows.catalogue import (
|
|
|
49
59
|
from infrahub.workflows.utils import add_tags
|
|
50
60
|
|
|
51
61
|
|
|
62
|
+
@flow(name="branch-migrate", flow_run_name="Apply migrations to branch {branch}")
|
|
63
|
+
async def migrate_branch(branch: str, context: InfrahubContext, send_events: bool = True) -> None:
|
|
64
|
+
await add_tags(branches=[branch])
|
|
65
|
+
|
|
66
|
+
database = await get_database()
|
|
67
|
+
async with database.start_session() as db:
|
|
68
|
+
log = get_run_logger()
|
|
69
|
+
|
|
70
|
+
obj = await Branch.get_by_name(db=db, name=branch)
|
|
71
|
+
|
|
72
|
+
if obj.graph_version == GRAPH_VERSION:
|
|
73
|
+
log.info(f"Branch '{obj.name}' has graph version {obj.graph_version}, no migrations to apply")
|
|
74
|
+
return
|
|
75
|
+
|
|
76
|
+
migration_runner = MigrationRunner(branch=obj)
|
|
77
|
+
if not migration_runner.has_migrations():
|
|
78
|
+
log.info(f"No migrations detected for branch '{obj.name}'")
|
|
79
|
+
obj.graph_version = GRAPH_VERSION
|
|
80
|
+
await obj.save(db=db)
|
|
81
|
+
return
|
|
82
|
+
|
|
83
|
+
# Branch status will remain as so if the migration process fails
|
|
84
|
+
# This will help user to know that a branch is in an invalid state to be used properly and that actions need to be taken
|
|
85
|
+
if obj.status != BranchStatus.NEED_UPGRADE_REBASE:
|
|
86
|
+
obj.status = BranchStatus.NEED_UPGRADE_REBASE
|
|
87
|
+
await obj.save(db=db)
|
|
88
|
+
|
|
89
|
+
try:
|
|
90
|
+
log.info(f"Running migrations for branch '{obj.name}'")
|
|
91
|
+
await migration_runner.run(db=db)
|
|
92
|
+
except MigrationFailureError as exc:
|
|
93
|
+
log.error(f"Failed to run migrations for branch '{obj.name}': {exc.errors}")
|
|
94
|
+
raise
|
|
95
|
+
|
|
96
|
+
if obj.status == BranchStatus.NEED_UPGRADE_REBASE:
|
|
97
|
+
obj.status = BranchStatus.OPEN
|
|
98
|
+
obj.graph_version = GRAPH_VERSION
|
|
99
|
+
await obj.save(db=db)
|
|
100
|
+
|
|
101
|
+
if send_events:
|
|
102
|
+
event_service = await get_event_service()
|
|
103
|
+
await event_service.send(
|
|
104
|
+
BranchMigratedEvent(
|
|
105
|
+
branch_name=obj.name, branch_id=str(obj.uuid), meta=EventMeta(branch=obj, context=context)
|
|
106
|
+
)
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
|
|
52
110
|
@flow(name="branch-rebase", flow_run_name="Rebase branch {branch}")
|
|
53
|
-
async def rebase_branch(branch: str, context: InfrahubContext) -> None: # noqa: PLR0915
|
|
111
|
+
async def rebase_branch(branch: str, context: InfrahubContext, send_events: bool = True) -> None: # noqa: PLR0915
|
|
112
|
+
workflow = get_workflow()
|
|
54
113
|
database = await get_database()
|
|
55
114
|
async with database.start_session() as db:
|
|
56
115
|
log = get_run_logger()
|
|
@@ -69,7 +128,7 @@ async def rebase_branch(branch: str, context: InfrahubContext) -> None: # noqa:
|
|
|
69
128
|
diff_repository=diff_repository,
|
|
70
129
|
source_branch=obj,
|
|
71
130
|
diff_locker=DiffLocker(),
|
|
72
|
-
workflow=
|
|
131
|
+
workflow=workflow,
|
|
73
132
|
)
|
|
74
133
|
|
|
75
134
|
enriched_diff_metadata = await diff_coordinator.update_branch_diff(base_branch=base_branch, diff_branch=obj)
|
|
@@ -156,15 +215,17 @@ async def rebase_branch(branch: str, context: InfrahubContext) -> None: # noqa:
|
|
|
156
215
|
target_branch_name=registry.default_branch,
|
|
157
216
|
)
|
|
158
217
|
if ipam_node_details:
|
|
159
|
-
await
|
|
218
|
+
await workflow.submit_workflow(
|
|
160
219
|
workflow=IPAM_RECONCILIATION,
|
|
161
220
|
context=context,
|
|
162
221
|
parameters={"branch": obj.name, "ipam_node_details": ipam_node_details},
|
|
163
222
|
)
|
|
164
223
|
|
|
165
|
-
await
|
|
166
|
-
|
|
167
|
-
|
|
224
|
+
await migrate_branch(branch=branch, context=context, send_events=send_events)
|
|
225
|
+
await workflow.submit_workflow(workflow=DIFF_REFRESH_ALL, context=context, parameters={"branch_name": obj.name})
|
|
226
|
+
|
|
227
|
+
if not send_events:
|
|
228
|
+
return
|
|
168
229
|
|
|
169
230
|
# -------------------------------------------------------------
|
|
170
231
|
# Generate an event to indicate that a branch has been rebased
|
|
@@ -37,6 +37,7 @@ RESERVED_ATTR_REL_NAMES = [
|
|
|
37
37
|
"rels",
|
|
38
38
|
"save",
|
|
39
39
|
"hfid",
|
|
40
|
+
"process_pools",
|
|
40
41
|
]
|
|
41
42
|
|
|
42
43
|
RESERVED_ATTR_GEN_NAMES = ["type"]
|
|
@@ -50,6 +51,7 @@ class EventType(InfrahubStringEnum):
|
|
|
50
51
|
BRANCH_CREATED = f"{EVENT_NAMESPACE}.branch.created"
|
|
51
52
|
BRANCH_DELETED = f"{EVENT_NAMESPACE}.branch.deleted"
|
|
52
53
|
BRANCH_MERGED = f"{EVENT_NAMESPACE}.branch.merged"
|
|
54
|
+
BRANCH_MIGRATED = f"{EVENT_NAMESPACE}.branch.migrated"
|
|
53
55
|
BRANCH_REBASED = f"{EVENT_NAMESPACE}.branch.rebased"
|
|
54
56
|
|
|
55
57
|
SCHEMA_UPDATED = f"{EVENT_NAMESPACE}.schema.updated"
|
|
@@ -99,6 +101,7 @@ class GlobalPermissions(InfrahubStringEnum):
|
|
|
99
101
|
MANAGE_PERMISSIONS = "manage_permissions"
|
|
100
102
|
MANAGE_REPOSITORIES = "manage_repositories"
|
|
101
103
|
OVERRIDE_CONTEXT = "override_context"
|
|
104
|
+
UPDATE_OBJECT_HFID_DISPLAY_LABEL = "update_object_hfid_display_label"
|
|
102
105
|
|
|
103
106
|
|
|
104
107
|
class PermissionAction(InfrahubStringEnum):
|
infrahub/core/diff/calculator.py
CHANGED
|
@@ -141,8 +141,8 @@ class DiffCalculator:
|
|
|
141
141
|
to_time=to_time,
|
|
142
142
|
previous_node_field_specifiers=previous_node_specifiers,
|
|
143
143
|
)
|
|
144
|
-
node_limit = int(config.SETTINGS.database.query_size_limit / 10)
|
|
145
|
-
fields_limit = int(config.SETTINGS.database.query_size_limit / 3)
|
|
144
|
+
node_limit = max(int(config.SETTINGS.database.query_size_limit / 10), 1)
|
|
145
|
+
fields_limit = max(int(config.SETTINGS.database.query_size_limit / 3), 1)
|
|
146
146
|
properties_limit = config.SETTINGS.database.query_size_limit
|
|
147
147
|
|
|
148
148
|
calculation_request = DiffCalculationRequest(
|
|
@@ -20,10 +20,14 @@ class EnrichedDiffDeleteQuery(Query):
|
|
|
20
20
|
diff_filter = "WHERE d_root.uuid IN $diff_root_uuids"
|
|
21
21
|
|
|
22
22
|
query = """
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
23
|
+
MATCH (d_root:DiffRoot)
|
|
24
|
+
%(diff_filter)s
|
|
25
|
+
OPTIONAL MATCH (d_root)-[*]->(diff_thing)
|
|
26
|
+
WITH DISTINCT d_root, diff_thing
|
|
27
|
+
ORDER BY elementId(diff_thing)
|
|
28
|
+
CALL (diff_thing) {
|
|
29
|
+
DETACH DELETE diff_thing
|
|
30
|
+
} IN TRANSACTIONS
|
|
31
|
+
DETACH DELETE d_root
|
|
28
32
|
""" % {"diff_filter": diff_filter}
|
|
29
33
|
self.add_to_query(query=query)
|