infrahub-server 1.4.13__py3-none-any.whl → 1.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/actions/tasks.py +208 -16
- infrahub/api/artifact.py +3 -0
- infrahub/api/diff/diff.py +1 -1
- infrahub/api/internal.py +2 -0
- infrahub/api/query.py +2 -0
- infrahub/api/schema.py +27 -3
- infrahub/auth.py +5 -5
- infrahub/cli/__init__.py +2 -0
- infrahub/cli/db.py +160 -157
- infrahub/cli/dev.py +118 -0
- infrahub/cli/upgrade.py +56 -9
- infrahub/computed_attribute/tasks.py +19 -7
- infrahub/config.py +7 -2
- infrahub/core/attribute.py +35 -24
- infrahub/core/branch/enums.py +1 -1
- infrahub/core/branch/models.py +9 -5
- infrahub/core/branch/needs_rebase_status.py +11 -0
- infrahub/core/branch/tasks.py +72 -10
- infrahub/core/changelog/models.py +2 -10
- infrahub/core/constants/__init__.py +4 -0
- infrahub/core/constants/infrahubkind.py +1 -0
- infrahub/core/convert_object_type/object_conversion.py +201 -0
- infrahub/core/convert_object_type/repository_conversion.py +89 -0
- infrahub/core/convert_object_type/schema_mapping.py +27 -3
- infrahub/core/diff/model/path.py +4 -0
- infrahub/core/diff/payload_builder.py +1 -1
- infrahub/core/diff/query/artifact.py +1 -0
- infrahub/core/diff/query/field_summary.py +1 -0
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/initialization.py +7 -4
- infrahub/core/manager.py +3 -81
- infrahub/core/migrations/__init__.py +3 -0
- infrahub/core/migrations/exceptions.py +4 -0
- infrahub/core/migrations/graph/__init__.py +11 -10
- infrahub/core/migrations/graph/load_schema_branch.py +21 -0
- infrahub/core/migrations/graph/m013_convert_git_password_credential.py +1 -1
- infrahub/core/migrations/graph/m037_index_attr_vals.py +11 -30
- infrahub/core/migrations/graph/m039_ipam_reconcile.py +9 -7
- infrahub/core/migrations/graph/m042_profile_attrs_in_db.py +147 -0
- infrahub/core/migrations/graph/m043_create_hfid_display_label_in_db.py +164 -0
- infrahub/core/migrations/graph/m044_backfill_hfid_display_label_in_db.py +864 -0
- infrahub/core/migrations/query/__init__.py +7 -8
- infrahub/core/migrations/query/attribute_add.py +8 -6
- infrahub/core/migrations/query/attribute_remove.py +134 -0
- infrahub/core/migrations/runner.py +54 -0
- infrahub/core/migrations/schema/attribute_kind_update.py +9 -3
- infrahub/core/migrations/schema/attribute_supports_profile.py +90 -0
- infrahub/core/migrations/schema/node_attribute_add.py +26 -5
- infrahub/core/migrations/schema/node_attribute_remove.py +13 -109
- infrahub/core/migrations/schema/node_kind_update.py +2 -1
- infrahub/core/migrations/schema/node_remove.py +2 -1
- infrahub/core/migrations/schema/placeholder_dummy.py +3 -2
- infrahub/core/migrations/shared.py +66 -19
- infrahub/core/models.py +2 -2
- infrahub/core/node/__init__.py +207 -54
- infrahub/core/node/create.py +53 -49
- infrahub/core/node/lock_utils.py +124 -0
- infrahub/core/node/node_property_attribute.py +230 -0
- infrahub/core/node/resource_manager/ip_address_pool.py +2 -1
- infrahub/core/node/resource_manager/ip_prefix_pool.py +2 -1
- infrahub/core/node/resource_manager/number_pool.py +2 -1
- infrahub/core/node/standard.py +1 -1
- infrahub/core/property.py +11 -0
- infrahub/core/protocols.py +8 -1
- infrahub/core/query/attribute.py +82 -15
- infrahub/core/query/ipam.py +16 -4
- infrahub/core/query/node.py +66 -188
- infrahub/core/query/relationship.py +44 -26
- infrahub/core/query/subquery.py +0 -8
- infrahub/core/relationship/model.py +69 -24
- infrahub/core/schema/__init__.py +56 -0
- infrahub/core/schema/attribute_schema.py +4 -2
- infrahub/core/schema/basenode_schema.py +42 -2
- infrahub/core/schema/definitions/core/__init__.py +2 -0
- infrahub/core/schema/definitions/core/check.py +1 -1
- infrahub/core/schema/definitions/core/generator.py +2 -0
- infrahub/core/schema/definitions/core/group.py +16 -2
- infrahub/core/schema/definitions/core/repository.py +7 -0
- infrahub/core/schema/definitions/core/transform.py +1 -1
- infrahub/core/schema/definitions/internal.py +12 -3
- infrahub/core/schema/generated/attribute_schema.py +2 -2
- infrahub/core/schema/generated/base_node_schema.py +6 -1
- infrahub/core/schema/manager.py +3 -0
- infrahub/core/schema/node_schema.py +1 -0
- infrahub/core/schema/relationship_schema.py +0 -1
- infrahub/core/schema/schema_branch.py +295 -10
- infrahub/core/schema/schema_branch_display.py +135 -0
- infrahub/core/schema/schema_branch_hfid.py +120 -0
- infrahub/core/validators/aggregated_checker.py +1 -1
- infrahub/database/graph.py +21 -0
- infrahub/display_labels/__init__.py +0 -0
- infrahub/display_labels/gather.py +48 -0
- infrahub/display_labels/models.py +240 -0
- infrahub/display_labels/tasks.py +192 -0
- infrahub/display_labels/triggers.py +22 -0
- infrahub/events/branch_action.py +27 -1
- infrahub/events/group_action.py +1 -1
- infrahub/events/node_action.py +1 -1
- infrahub/generators/constants.py +7 -0
- infrahub/generators/models.py +38 -12
- infrahub/generators/tasks.py +34 -16
- infrahub/git/base.py +38 -1
- infrahub/git/integrator.py +22 -14
- infrahub/graphql/api/dependencies.py +2 -4
- infrahub/graphql/api/endpoints.py +16 -6
- infrahub/graphql/app.py +2 -4
- infrahub/graphql/initialization.py +2 -3
- infrahub/graphql/manager.py +213 -137
- infrahub/graphql/middleware.py +12 -0
- infrahub/graphql/mutations/branch.py +16 -0
- infrahub/graphql/mutations/computed_attribute.py +110 -3
- infrahub/graphql/mutations/convert_object_type.py +44 -13
- infrahub/graphql/mutations/display_label.py +118 -0
- infrahub/graphql/mutations/generator.py +25 -7
- infrahub/graphql/mutations/hfid.py +125 -0
- infrahub/graphql/mutations/ipam.py +73 -41
- infrahub/graphql/mutations/main.py +61 -178
- infrahub/graphql/mutations/profile.py +195 -0
- infrahub/graphql/mutations/proposed_change.py +8 -1
- infrahub/graphql/mutations/relationship.py +2 -2
- infrahub/graphql/mutations/repository.py +22 -83
- infrahub/graphql/mutations/resource_manager.py +2 -2
- infrahub/graphql/mutations/webhook.py +1 -1
- infrahub/graphql/queries/resource_manager.py +1 -1
- infrahub/graphql/registry.py +173 -0
- infrahub/graphql/resolvers/resolver.py +2 -0
- infrahub/graphql/schema.py +8 -1
- infrahub/graphql/schema_sort.py +170 -0
- infrahub/graphql/types/branch.py +4 -1
- infrahub/graphql/types/enums.py +3 -0
- infrahub/groups/tasks.py +1 -1
- infrahub/hfid/__init__.py +0 -0
- infrahub/hfid/gather.py +48 -0
- infrahub/hfid/models.py +240 -0
- infrahub/hfid/tasks.py +191 -0
- infrahub/hfid/triggers.py +22 -0
- infrahub/lock.py +119 -42
- infrahub/locks/__init__.py +0 -0
- infrahub/locks/tasks.py +37 -0
- infrahub/patch/plan_writer.py +2 -2
- infrahub/permissions/constants.py +2 -0
- infrahub/profiles/__init__.py +0 -0
- infrahub/profiles/node_applier.py +101 -0
- infrahub/profiles/queries/__init__.py +0 -0
- infrahub/profiles/queries/get_profile_data.py +98 -0
- infrahub/profiles/tasks.py +63 -0
- infrahub/proposed_change/tasks.py +24 -5
- infrahub/repositories/__init__.py +0 -0
- infrahub/repositories/create_repository.py +113 -0
- infrahub/server.py +9 -1
- infrahub/services/__init__.py +8 -5
- infrahub/services/adapters/workflow/worker.py +5 -2
- infrahub/task_manager/event.py +5 -0
- infrahub/task_manager/models.py +7 -0
- infrahub/tasks/registry.py +6 -4
- infrahub/trigger/catalogue.py +4 -0
- infrahub/trigger/models.py +2 -0
- infrahub/trigger/setup.py +13 -4
- infrahub/trigger/tasks.py +6 -0
- infrahub/webhook/models.py +1 -1
- infrahub/workers/dependencies.py +3 -1
- infrahub/workers/infrahub_async.py +5 -1
- infrahub/workflows/catalogue.py +118 -3
- infrahub/workflows/initialization.py +21 -0
- infrahub/workflows/models.py +17 -2
- infrahub_sdk/branch.py +17 -8
- infrahub_sdk/checks.py +1 -1
- infrahub_sdk/client.py +376 -95
- infrahub_sdk/config.py +29 -2
- infrahub_sdk/convert_object_type.py +61 -0
- infrahub_sdk/ctl/branch.py +3 -0
- infrahub_sdk/ctl/check.py +2 -3
- infrahub_sdk/ctl/cli_commands.py +20 -12
- infrahub_sdk/ctl/config.py +8 -2
- infrahub_sdk/ctl/generator.py +6 -3
- infrahub_sdk/ctl/graphql.py +184 -0
- infrahub_sdk/ctl/repository.py +39 -1
- infrahub_sdk/ctl/schema.py +40 -10
- infrahub_sdk/ctl/task.py +110 -0
- infrahub_sdk/ctl/utils.py +4 -0
- infrahub_sdk/ctl/validate.py +5 -3
- infrahub_sdk/diff.py +4 -5
- infrahub_sdk/exceptions.py +2 -0
- infrahub_sdk/generator.py +7 -1
- infrahub_sdk/graphql/__init__.py +12 -0
- infrahub_sdk/graphql/constants.py +1 -0
- infrahub_sdk/graphql/plugin.py +85 -0
- infrahub_sdk/graphql/query.py +77 -0
- infrahub_sdk/{graphql.py → graphql/renderers.py} +88 -75
- infrahub_sdk/graphql/utils.py +40 -0
- infrahub_sdk/node/attribute.py +2 -0
- infrahub_sdk/node/node.py +28 -20
- infrahub_sdk/node/relationship.py +1 -3
- infrahub_sdk/playback.py +1 -2
- infrahub_sdk/protocols.py +54 -6
- infrahub_sdk/pytest_plugin/plugin.py +7 -4
- infrahub_sdk/pytest_plugin/utils.py +40 -0
- infrahub_sdk/repository.py +1 -2
- infrahub_sdk/schema/__init__.py +70 -4
- infrahub_sdk/schema/main.py +1 -0
- infrahub_sdk/schema/repository.py +8 -0
- infrahub_sdk/spec/models.py +7 -0
- infrahub_sdk/spec/object.py +54 -6
- infrahub_sdk/spec/processors/__init__.py +0 -0
- infrahub_sdk/spec/processors/data_processor.py +10 -0
- infrahub_sdk/spec/processors/factory.py +34 -0
- infrahub_sdk/spec/processors/range_expand_processor.py +56 -0
- infrahub_sdk/spec/range_expansion.py +118 -0
- infrahub_sdk/task/models.py +6 -4
- infrahub_sdk/timestamp.py +18 -6
- infrahub_sdk/transforms.py +1 -1
- {infrahub_server-1.4.13.dist-info → infrahub_server-1.5.0.dist-info}/METADATA +9 -10
- {infrahub_server-1.4.13.dist-info → infrahub_server-1.5.0.dist-info}/RECORD +221 -165
- infrahub_testcontainers/container.py +114 -2
- infrahub_testcontainers/docker-compose-cluster.test.yml +5 -0
- infrahub_testcontainers/docker-compose.test.yml +5 -0
- infrahub_testcontainers/models.py +2 -2
- infrahub_testcontainers/performance_test.py +4 -4
- infrahub/core/convert_object_type/conversion.py +0 -134
- {infrahub_server-1.4.13.dist-info → infrahub_server-1.5.0.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.4.13.dist-info → infrahub_server-1.5.0.dist-info}/WHEEL +0 -0
- {infrahub_server-1.4.13.dist-info → infrahub_server-1.5.0.dist-info}/entry_points.txt +0 -0
infrahub/core/branch/tasks.py
CHANGED
|
@@ -12,6 +12,7 @@ from infrahub import lock
|
|
|
12
12
|
from infrahub.context import InfrahubContext # noqa: TC001 needed for prefect flow
|
|
13
13
|
from infrahub.core import registry
|
|
14
14
|
from infrahub.core.branch import Branch
|
|
15
|
+
from infrahub.core.branch.enums import BranchStatus
|
|
15
16
|
from infrahub.core.changelog.diff import DiffChangelogCollector, MigrationTracker
|
|
16
17
|
from infrahub.core.constants import MutationAction
|
|
17
18
|
from infrahub.core.diff.coordinator import DiffCoordinator
|
|
@@ -21,7 +22,10 @@ from infrahub.core.diff.merger.merger import DiffMerger
|
|
|
21
22
|
from infrahub.core.diff.model.path import BranchTrackingId, EnrichedDiffRoot, EnrichedDiffRootMetadata
|
|
22
23
|
from infrahub.core.diff.models import RequestDiffUpdate
|
|
23
24
|
from infrahub.core.diff.repository.repository import DiffRepository
|
|
25
|
+
from infrahub.core.graph import GRAPH_VERSION
|
|
24
26
|
from infrahub.core.merge import BranchMerger
|
|
27
|
+
from infrahub.core.migrations.exceptions import MigrationFailureError
|
|
28
|
+
from infrahub.core.migrations.runner import MigrationRunner
|
|
25
29
|
from infrahub.core.migrations.schema.models import SchemaApplyMigrationData
|
|
26
30
|
from infrahub.core.migrations.schema.tasks import schema_apply_migrations
|
|
27
31
|
from infrahub.core.timestamp import Timestamp
|
|
@@ -29,10 +33,17 @@ from infrahub.core.validators.determiner import ConstraintValidatorDeterminer
|
|
|
29
33
|
from infrahub.core.validators.models.validate_migration import SchemaValidateMigrationData
|
|
30
34
|
from infrahub.core.validators.tasks import schema_validate_migrations
|
|
31
35
|
from infrahub.dependencies.registry import get_component_registry
|
|
32
|
-
from infrahub.events.branch_action import
|
|
36
|
+
from infrahub.events.branch_action import (
|
|
37
|
+
BranchCreatedEvent,
|
|
38
|
+
BranchDeletedEvent,
|
|
39
|
+
BranchMergedEvent,
|
|
40
|
+
BranchMigratedEvent,
|
|
41
|
+
BranchRebasedEvent,
|
|
42
|
+
)
|
|
33
43
|
from infrahub.events.models import EventMeta, InfrahubEvent
|
|
34
44
|
from infrahub.events.node_action import get_node_event
|
|
35
45
|
from infrahub.exceptions import BranchNotFoundError, ValidationError
|
|
46
|
+
from infrahub.generators.constants import GeneratorDefinitionRunSource
|
|
36
47
|
from infrahub.graphql.mutations.models import BranchCreateModel # noqa: TC001
|
|
37
48
|
from infrahub.workers.dependencies import get_component, get_database, get_event_service, get_workflow
|
|
38
49
|
from infrahub.workflows.catalogue import (
|
|
@@ -48,8 +59,57 @@ from infrahub.workflows.catalogue import (
|
|
|
48
59
|
from infrahub.workflows.utils import add_tags
|
|
49
60
|
|
|
50
61
|
|
|
62
|
+
@flow(name="branch-migrate", flow_run_name="Apply migrations to branch {branch}")
|
|
63
|
+
async def migrate_branch(branch: str, context: InfrahubContext, send_events: bool = True) -> None:
|
|
64
|
+
await add_tags(branches=[branch])
|
|
65
|
+
|
|
66
|
+
database = await get_database()
|
|
67
|
+
async with database.start_session() as db:
|
|
68
|
+
log = get_run_logger()
|
|
69
|
+
|
|
70
|
+
obj = await Branch.get_by_name(db=db, name=branch)
|
|
71
|
+
|
|
72
|
+
if obj.graph_version == GRAPH_VERSION:
|
|
73
|
+
log.info(f"Branch '{obj.name}' has graph version {obj.graph_version}, no migrations to apply")
|
|
74
|
+
return
|
|
75
|
+
|
|
76
|
+
migration_runner = MigrationRunner(branch=obj)
|
|
77
|
+
if not migration_runner.has_migrations():
|
|
78
|
+
log.info(f"No migrations detected for branch '{obj.name}'")
|
|
79
|
+
obj.graph_version = GRAPH_VERSION
|
|
80
|
+
await obj.save(db=db)
|
|
81
|
+
return
|
|
82
|
+
|
|
83
|
+
# Branch status will remain as so if the migration process fails
|
|
84
|
+
# This will help user to know that a branch is in an invalid state to be used properly and that actions need to be taken
|
|
85
|
+
if obj.status != BranchStatus.NEED_UPGRADE_REBASE:
|
|
86
|
+
obj.status = BranchStatus.NEED_UPGRADE_REBASE
|
|
87
|
+
await obj.save(db=db)
|
|
88
|
+
|
|
89
|
+
try:
|
|
90
|
+
log.info(f"Running migrations for branch '{obj.name}'")
|
|
91
|
+
await migration_runner.run(db=db)
|
|
92
|
+
except MigrationFailureError as exc:
|
|
93
|
+
log.error(f"Failed to run migrations for branch '{obj.name}': {exc.errors}")
|
|
94
|
+
raise
|
|
95
|
+
|
|
96
|
+
if obj.status == BranchStatus.NEED_UPGRADE_REBASE:
|
|
97
|
+
obj.status = BranchStatus.OPEN
|
|
98
|
+
obj.graph_version = GRAPH_VERSION
|
|
99
|
+
await obj.save(db=db)
|
|
100
|
+
|
|
101
|
+
if send_events:
|
|
102
|
+
event_service = await get_event_service()
|
|
103
|
+
await event_service.send(
|
|
104
|
+
BranchMigratedEvent(
|
|
105
|
+
branch_name=obj.name, branch_id=str(obj.uuid), meta=EventMeta(branch=obj, context=context)
|
|
106
|
+
)
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
|
|
51
110
|
@flow(name="branch-rebase", flow_run_name="Rebase branch {branch}")
|
|
52
|
-
async def rebase_branch(branch: str, context: InfrahubContext) -> None: # noqa: PLR0915
|
|
111
|
+
async def rebase_branch(branch: str, context: InfrahubContext, send_events: bool = True) -> None: # noqa: PLR0915
|
|
112
|
+
workflow = get_workflow()
|
|
53
113
|
database = await get_database()
|
|
54
114
|
async with database.start_session() as db:
|
|
55
115
|
log = get_run_logger()
|
|
@@ -68,7 +128,7 @@ async def rebase_branch(branch: str, context: InfrahubContext) -> None: # noqa:
|
|
|
68
128
|
diff_repository=diff_repository,
|
|
69
129
|
source_branch=obj,
|
|
70
130
|
diff_locker=DiffLocker(),
|
|
71
|
-
workflow=
|
|
131
|
+
workflow=workflow,
|
|
72
132
|
)
|
|
73
133
|
|
|
74
134
|
enriched_diff_metadata = await diff_coordinator.update_branch_diff(base_branch=base_branch, diff_branch=obj)
|
|
@@ -155,15 +215,17 @@ async def rebase_branch(branch: str, context: InfrahubContext) -> None: # noqa:
|
|
|
155
215
|
target_branch_name=registry.default_branch,
|
|
156
216
|
)
|
|
157
217
|
if ipam_node_details:
|
|
158
|
-
await
|
|
218
|
+
await workflow.submit_workflow(
|
|
159
219
|
workflow=IPAM_RECONCILIATION,
|
|
160
220
|
context=context,
|
|
161
221
|
parameters={"branch": obj.name, "ipam_node_details": ipam_node_details},
|
|
162
222
|
)
|
|
163
223
|
|
|
164
|
-
await
|
|
165
|
-
|
|
166
|
-
|
|
224
|
+
await migrate_branch(branch=branch, context=context, send_events=send_events)
|
|
225
|
+
await workflow.submit_workflow(workflow=DIFF_REFRESH_ALL, context=context, parameters={"branch_name": obj.name})
|
|
226
|
+
|
|
227
|
+
if not send_events:
|
|
228
|
+
return
|
|
167
229
|
|
|
168
230
|
# -------------------------------------------------------------
|
|
169
231
|
# Generate an event to indicate that a branch has been rebased
|
|
@@ -345,7 +407,7 @@ async def create_branch(model: BranchCreateModel, context: InfrahubContext) -> N
|
|
|
345
407
|
async with database.start_session() as db:
|
|
346
408
|
try:
|
|
347
409
|
await Branch.get_by_name(db=db, name=model.name)
|
|
348
|
-
raise
|
|
410
|
+
raise ValidationError(f"The branch {model.name} already exists")
|
|
349
411
|
except BranchNotFoundError:
|
|
350
412
|
pass
|
|
351
413
|
|
|
@@ -356,7 +418,7 @@ async def create_branch(model: BranchCreateModel, context: InfrahubContext) -> N
|
|
|
356
418
|
obj = Branch(**data_dict)
|
|
357
419
|
except pydantic.ValidationError as exc:
|
|
358
420
|
error_msgs = [f"invalid field {error['loc'][0]}: {error['msg']}" for error in exc.errors()]
|
|
359
|
-
raise
|
|
421
|
+
raise ValidationError("\n".join(error_msgs)) from exc
|
|
360
422
|
|
|
361
423
|
async with lock.registry.local_schema_lock():
|
|
362
424
|
# Copy the schema from the origin branch and set the hash and the schema_changed_at value
|
|
@@ -437,7 +499,7 @@ async def post_process_branch_merge(source_branch: str, target_branch: str, cont
|
|
|
437
499
|
await get_workflow().submit_workflow(
|
|
438
500
|
workflow=TRIGGER_GENERATOR_DEFINITION_RUN,
|
|
439
501
|
context=context,
|
|
440
|
-
parameters={"branch": target_branch},
|
|
502
|
+
parameters={"branch": target_branch, "source": GeneratorDefinitionRunSource.MERGE},
|
|
441
503
|
)
|
|
442
504
|
|
|
443
505
|
for diff_root in branch_diff_roots:
|
|
@@ -596,11 +596,7 @@ class RelationshipChangelogGetter:
|
|
|
596
596
|
secondaries: list[NodeChangelog] = []
|
|
597
597
|
peer_relation = peer_schema.get_relationship_by_identifier(id=str(rel_schema.identifier), raise_on_error=False)
|
|
598
598
|
if peer_relation:
|
|
599
|
-
node_changelog = NodeChangelog(
|
|
600
|
-
node_id=peer_id,
|
|
601
|
-
node_kind=peer_kind,
|
|
602
|
-
display_label="n/a",
|
|
603
|
-
)
|
|
599
|
+
node_changelog = NodeChangelog(node_id=peer_id, node_kind=peer_kind, display_label="n/a")
|
|
604
600
|
if peer_relation.cardinality == RelationshipCardinality.ONE:
|
|
605
601
|
node_changelog.relationships[peer_relation.name] = RelationshipCardinalityOneChangelog(
|
|
606
602
|
name=peer_relation.name,
|
|
@@ -634,11 +630,7 @@ class RelationshipChangelogGetter:
|
|
|
634
630
|
secondaries: list[NodeChangelog] = []
|
|
635
631
|
peer_relation = peer_schema.get_relationship_by_identifier(id=str(rel_schema.identifier), raise_on_error=False)
|
|
636
632
|
if peer_relation:
|
|
637
|
-
node_changelog = NodeChangelog(
|
|
638
|
-
node_id=peer_id,
|
|
639
|
-
node_kind=peer_kind,
|
|
640
|
-
display_label="n/a",
|
|
641
|
-
)
|
|
633
|
+
node_changelog = NodeChangelog(node_id=peer_id, node_kind=peer_kind, display_label="n/a")
|
|
642
634
|
if peer_relation.cardinality == RelationshipCardinality.ONE:
|
|
643
635
|
node_changelog.relationships[peer_relation.name] = RelationshipCardinalityOneChangelog(
|
|
644
636
|
name=peer_relation.name,
|
|
@@ -37,6 +37,7 @@ RESERVED_ATTR_REL_NAMES = [
|
|
|
37
37
|
"rels",
|
|
38
38
|
"save",
|
|
39
39
|
"hfid",
|
|
40
|
+
"process_pools",
|
|
40
41
|
]
|
|
41
42
|
|
|
42
43
|
RESERVED_ATTR_GEN_NAMES = ["type"]
|
|
@@ -50,6 +51,7 @@ class EventType(InfrahubStringEnum):
|
|
|
50
51
|
BRANCH_CREATED = f"{EVENT_NAMESPACE}.branch.created"
|
|
51
52
|
BRANCH_DELETED = f"{EVENT_NAMESPACE}.branch.deleted"
|
|
52
53
|
BRANCH_MERGED = f"{EVENT_NAMESPACE}.branch.merged"
|
|
54
|
+
BRANCH_MIGRATED = f"{EVENT_NAMESPACE}.branch.migrated"
|
|
53
55
|
BRANCH_REBASED = f"{EVENT_NAMESPACE}.branch.rebased"
|
|
54
56
|
|
|
55
57
|
SCHEMA_UPDATED = f"{EVENT_NAMESPACE}.schema.updated"
|
|
@@ -99,6 +101,7 @@ class GlobalPermissions(InfrahubStringEnum):
|
|
|
99
101
|
MANAGE_PERMISSIONS = "manage_permissions"
|
|
100
102
|
MANAGE_REPOSITORIES = "manage_repositories"
|
|
101
103
|
OVERRIDE_CONTEXT = "override_context"
|
|
104
|
+
UPDATE_OBJECT_HFID_DISPLAY_LABEL = "update_object_hfid_display_label"
|
|
102
105
|
|
|
103
106
|
|
|
104
107
|
class PermissionAction(InfrahubStringEnum):
|
|
@@ -387,3 +390,4 @@ DEFAULT_REL_IDENTIFIER_LENGTH = 128
|
|
|
387
390
|
|
|
388
391
|
OBJECT_TEMPLATE_RELATIONSHIP_NAME = "object_template"
|
|
389
392
|
OBJECT_TEMPLATE_NAME_ATTR = "template_name"
|
|
393
|
+
PROFILE_NODE_RELATIONSHIP_IDENTIFIER = "node__profile"
|
|
@@ -28,6 +28,7 @@ GENERATORDEFINITION = "CoreGeneratorDefinition"
|
|
|
28
28
|
GENERATORINSTANCE = "CoreGeneratorInstance"
|
|
29
29
|
GENERATORVALIDATOR = "CoreGeneratorValidator"
|
|
30
30
|
GENERATORGROUP = "CoreGeneratorGroup"
|
|
31
|
+
GENERATORAWAREGROUP = "CoreGeneratorAwareGroup"
|
|
31
32
|
GENERICGROUP = "CoreGroup"
|
|
32
33
|
GLOBALPERMISSION = "CoreGlobalPermission"
|
|
33
34
|
GRAPHQLQUERY = "CoreGraphQLQuery"
|
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
from typing import Any, assert_never
|
|
2
|
+
|
|
3
|
+
from infrahub_sdk.convert_object_type import ConversionFieldInput, ConversionFieldValue
|
|
4
|
+
|
|
5
|
+
from infrahub.core.attribute import BaseAttribute
|
|
6
|
+
from infrahub.core.branch import Branch
|
|
7
|
+
from infrahub.core.branch.enums import BranchStatus
|
|
8
|
+
from infrahub.core.constants import BranchSupportType, RelationshipCardinality
|
|
9
|
+
from infrahub.core.manager import NodeManager
|
|
10
|
+
from infrahub.core.node import Node
|
|
11
|
+
from infrahub.core.node.create import create_node
|
|
12
|
+
from infrahub.core.query.relationship import GetAllPeersIds
|
|
13
|
+
from infrahub.core.query.resource_manager import PoolChangeReserved
|
|
14
|
+
from infrahub.core.relationship import RelationshipManager
|
|
15
|
+
from infrahub.core.schema import NodeSchema
|
|
16
|
+
from infrahub.core.timestamp import Timestamp
|
|
17
|
+
from infrahub.database import InfrahubDatabase
|
|
18
|
+
from infrahub.message_bus.messages import RefreshRegistryBranches
|
|
19
|
+
from infrahub.tasks.registry import update_branch_registry
|
|
20
|
+
from infrahub.workers.dependencies import get_message_bus
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _get_conversion_field_raw_value(conv_field_value: ConversionFieldValue) -> Any:
|
|
24
|
+
if conv_field_value.attribute_value is not None:
|
|
25
|
+
return conv_field_value.attribute_value
|
|
26
|
+
if conv_field_value.peer_id is not None:
|
|
27
|
+
return conv_field_value.peer_id
|
|
28
|
+
if conv_field_value.peers_ids is not None:
|
|
29
|
+
return conv_field_value.peers_ids
|
|
30
|
+
raise ValueError("ConversionFieldValue has not been validated correctly.")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def get_out_rels_peers_ids(node: Node, db: InfrahubDatabase, at: Timestamp) -> list[str]:
|
|
34
|
+
all_peers_ids: list[str] = []
|
|
35
|
+
for name in node._relationships:
|
|
36
|
+
relm: RelationshipManager = getattr(node, name)
|
|
37
|
+
peers = await relm.get_db_peers(db=db, at=at)
|
|
38
|
+
all_peers_ids.extend([str(peer.peer_id) for peer in peers])
|
|
39
|
+
return all_peers_ids
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
async def build_data_new_node(db: InfrahubDatabase, mapping: dict[str, ConversionFieldInput], node: Node) -> dict:
|
|
43
|
+
"""Value of a given field on the target kind to convert is either an input source attribute/relationship of the source node,
|
|
44
|
+
or a raw value."""
|
|
45
|
+
|
|
46
|
+
data = {}
|
|
47
|
+
for dest_field_name, conv_field_input in mapping.items():
|
|
48
|
+
if conv_field_input.source_field is not None:
|
|
49
|
+
# Fetch the value of the corresponding field from the node being converted.
|
|
50
|
+
item = getattr(node, conv_field_input.source_field)
|
|
51
|
+
if isinstance(item, BaseAttribute):
|
|
52
|
+
data[dest_field_name] = item.value
|
|
53
|
+
elif isinstance(item, RelationshipManager):
|
|
54
|
+
if item.schema.cardinality == RelationshipCardinality.ONE:
|
|
55
|
+
peer = await item.get_peer(db=db)
|
|
56
|
+
if peer is not None:
|
|
57
|
+
data[dest_field_name] = {"id": peer.id}
|
|
58
|
+
# else, relationship is optional, and if the target relationship is mandatory an error will be raised during creation
|
|
59
|
+
elif item.schema.cardinality == RelationshipCardinality.MANY:
|
|
60
|
+
data[dest_field_name] = [{"id": peer.id} for _, peer in (await item.get_peers(db=db)).items()]
|
|
61
|
+
else:
|
|
62
|
+
assert_never(item.schema.cardinality)
|
|
63
|
+
elif conv_field_input.data is not None:
|
|
64
|
+
data[dest_field_name] = _get_conversion_field_raw_value(conv_field_input.data)
|
|
65
|
+
elif conv_field_input.use_default_value is True:
|
|
66
|
+
pass # default value will be used automatically when creating the node
|
|
67
|
+
else:
|
|
68
|
+
raise ValueError("ConversionFieldInput has not been validated correctly.")
|
|
69
|
+
return data
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
async def get_unidirectional_rels_peers_ids(
|
|
73
|
+
node: Node, branch: Branch, db: InfrahubDatabase, at: Timestamp
|
|
74
|
+
) -> list[str]:
|
|
75
|
+
"""
|
|
76
|
+
Returns peers ids of nodes connected to input `node` through an incoming unidirectional relationship.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
out_rels_identifier = [rel.identifier for rel in node.get_schema().relationships]
|
|
80
|
+
branch_agnostic = node.get_schema().branch == BranchSupportType.AGNOSTIC
|
|
81
|
+
query = await GetAllPeersIds.init(
|
|
82
|
+
db=db,
|
|
83
|
+
node_id=node.id,
|
|
84
|
+
branch=branch,
|
|
85
|
+
exclude_identifiers=out_rels_identifier,
|
|
86
|
+
branch_agnostic=branch_agnostic,
|
|
87
|
+
at=at,
|
|
88
|
+
)
|
|
89
|
+
await query.execute(db=db)
|
|
90
|
+
return query.get_peers_uuids()
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
async def _get_other_active_branches(db: InfrahubDatabase) -> list[Branch]:
|
|
94
|
+
branches = await Branch.get_list(db=db)
|
|
95
|
+
return [branch for branch in branches if not (branch.is_global or branch.is_default)]
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def _has_pass_thru_aware_attributes(node_schema: NodeSchema, mapping: dict[str, ConversionFieldInput]) -> bool:
|
|
99
|
+
aware_attributes = [attr for attr in node_schema.attributes if attr.branch != BranchSupportType.AGNOSTIC]
|
|
100
|
+
aware_attributes_pass_thru = [
|
|
101
|
+
attr.name for attr in aware_attributes if attr.name in mapping and mapping[attr.name].source_field is not None
|
|
102
|
+
]
|
|
103
|
+
return len(aware_attributes_pass_thru) > 0
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
async def validate_conversion(
|
|
107
|
+
deleted_node: Node, branch: Branch, db: InfrahubDatabase, timestamp_before_conversion: Timestamp
|
|
108
|
+
) -> None:
|
|
109
|
+
deleted_node_out_rels_peer_ids = await get_out_rels_peers_ids(
|
|
110
|
+
node=deleted_node, db=db, at=timestamp_before_conversion
|
|
111
|
+
)
|
|
112
|
+
deleted_node_unidir_rels_peer_ids = await get_unidirectional_rels_peers_ids(
|
|
113
|
+
node=deleted_node, db=db, branch=branch, at=timestamp_before_conversion
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# Make sure relationships with constraints are not broken by retrieving them
|
|
117
|
+
peers_ids = deleted_node_out_rels_peer_ids + deleted_node_unidir_rels_peer_ids
|
|
118
|
+
peers = await NodeManager.get_many(ids=peers_ids, db=db, prefetch_relationships=True, branch=branch)
|
|
119
|
+
for peer in peers.values():
|
|
120
|
+
peer.validate_relationships()
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
async def convert_and_validate_object_type(
|
|
124
|
+
node: Node,
|
|
125
|
+
target_schema: NodeSchema,
|
|
126
|
+
mapping: dict[str, ConversionFieldInput],
|
|
127
|
+
branch: Branch,
|
|
128
|
+
db: InfrahubDatabase,
|
|
129
|
+
) -> Node:
|
|
130
|
+
async with db.start_transaction() as dbt:
|
|
131
|
+
timestamp_before_conversion = Timestamp()
|
|
132
|
+
new_node = await convert_object_type(
|
|
133
|
+
node=node, target_schema=target_schema, mapping=mapping, branch=branch, db=dbt
|
|
134
|
+
)
|
|
135
|
+
await validate_conversion(
|
|
136
|
+
deleted_node=node, branch=branch, db=dbt, timestamp_before_conversion=timestamp_before_conversion
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# Refresh outside the transaction otherwise other workers would pull outdated branch objects.
|
|
140
|
+
message_bus = await get_message_bus()
|
|
141
|
+
await message_bus.send(RefreshRegistryBranches())
|
|
142
|
+
|
|
143
|
+
return new_node
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
async def convert_object_type(
|
|
147
|
+
node: Node,
|
|
148
|
+
target_schema: NodeSchema,
|
|
149
|
+
mapping: dict[str, ConversionFieldInput],
|
|
150
|
+
branch: Branch,
|
|
151
|
+
db: InfrahubDatabase,
|
|
152
|
+
) -> Node:
|
|
153
|
+
"""Delete the node and return the new created one. If creation fails, the node is not deleted, and raise an error.
|
|
154
|
+
An extra check is performed on input node peers relationships to make sure they are still valid."""
|
|
155
|
+
|
|
156
|
+
node_schema = node.get_schema()
|
|
157
|
+
if not isinstance(node_schema, NodeSchema):
|
|
158
|
+
raise ValueError(f"Only a node with a NodeSchema can be converted, got {type(node_schema)}")
|
|
159
|
+
|
|
160
|
+
# Delete the node, so we delete relationships with peers as well, which might temporarily break cardinality constraints
|
|
161
|
+
# but they should be restored when creating the new node.
|
|
162
|
+
deleted_nodes = await NodeManager.delete(db=db, branch=branch, nodes=[node], cascade_delete=False)
|
|
163
|
+
if len(deleted_nodes) != 1:
|
|
164
|
+
raise ValueError(f"Deleted {len(deleted_nodes)} nodes instead of 1")
|
|
165
|
+
|
|
166
|
+
data_new_node = await build_data_new_node(db, mapping, node)
|
|
167
|
+
|
|
168
|
+
if node_schema.branch == BranchSupportType.AGNOSTIC and _has_pass_thru_aware_attributes(
|
|
169
|
+
node_schema=node_schema, mapping=mapping
|
|
170
|
+
):
|
|
171
|
+
if not branch.is_default:
|
|
172
|
+
raise ValueError(
|
|
173
|
+
f"Conversion of {node_schema.kind} is not allowed on branch {branch.name} because it is agnostic and has aware attributes"
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
# When converting an agnostic node with aware attributes, we need to put other branches in NEED_REBASE state
|
|
177
|
+
# as aware attributes do not exist in other branches after conversion
|
|
178
|
+
other_branches = await _get_other_active_branches(db=db)
|
|
179
|
+
for br in other_branches:
|
|
180
|
+
br.status = BranchStatus.NEED_REBASE
|
|
181
|
+
await br.save(db=db)
|
|
182
|
+
# Registry of other API workers are updated outside the transaction
|
|
183
|
+
await update_branch_registry(db=db, branch=br)
|
|
184
|
+
|
|
185
|
+
node_created = await create_node(
|
|
186
|
+
data=data_new_node,
|
|
187
|
+
db=db,
|
|
188
|
+
branch=branch,
|
|
189
|
+
schema=target_schema,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
# If the node had some value reserved in any Pools / Resource Manager, we need to change the identifier of the reservation(s)
|
|
193
|
+
query = await PoolChangeReserved.init(
|
|
194
|
+
db=db,
|
|
195
|
+
existing_identifier=node.get_id(),
|
|
196
|
+
new_identifier=node_created.get_id(),
|
|
197
|
+
branch=branch,
|
|
198
|
+
)
|
|
199
|
+
await query.execute(db=db)
|
|
200
|
+
|
|
201
|
+
return node_created
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
from infrahub import lock
|
|
2
|
+
from infrahub.core.branch import Branch
|
|
3
|
+
from infrahub.core.constants.infrahubkind import REPOSITORYVALIDATOR, USERVALIDATOR
|
|
4
|
+
from infrahub.core.convert_object_type.object_conversion import (
|
|
5
|
+
ConversionFieldInput,
|
|
6
|
+
convert_object_type,
|
|
7
|
+
validate_conversion,
|
|
8
|
+
)
|
|
9
|
+
from infrahub.core.manager import NodeManager
|
|
10
|
+
from infrahub.core.node import Node
|
|
11
|
+
from infrahub.core.protocols import CoreReadOnlyRepository, CoreRepository
|
|
12
|
+
from infrahub.core.schema import NodeSchema
|
|
13
|
+
from infrahub.core.timestamp import Timestamp
|
|
14
|
+
from infrahub.database import InfrahubDatabase
|
|
15
|
+
from infrahub.message_bus.messages import RefreshRegistryBranches
|
|
16
|
+
from infrahub.repositories.create_repository import RepositoryFinalizer
|
|
17
|
+
from infrahub.workers.dependencies import get_message_bus
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
async def convert_repository_type(
|
|
21
|
+
repository: CoreRepository | CoreReadOnlyRepository,
|
|
22
|
+
target_schema: NodeSchema,
|
|
23
|
+
mapping: dict[str, ConversionFieldInput],
|
|
24
|
+
branch: Branch,
|
|
25
|
+
db: InfrahubDatabase,
|
|
26
|
+
repository_post_creator: RepositoryFinalizer,
|
|
27
|
+
) -> Node:
|
|
28
|
+
"""Delete the node and return the new created one. If creation fails, the node is not deleted, and raise an error.
|
|
29
|
+
An extra check is performed on input node peers relationships to make sure they are still valid."""
|
|
30
|
+
|
|
31
|
+
repo_name = repository.name.value
|
|
32
|
+
async with lock.registry.get(name=repo_name, namespace="repository"):
|
|
33
|
+
async with db.start_transaction() as dbt:
|
|
34
|
+
timestamp_before_conversion = Timestamp()
|
|
35
|
+
|
|
36
|
+
# Fetch validators before deleting the repository otherwise validator-repository would no longer exist
|
|
37
|
+
user_validators = await NodeManager.query(
|
|
38
|
+
db=dbt, schema=USERVALIDATOR, prefetch_relationships=True, filters={"repository__id": repository.id}
|
|
39
|
+
)
|
|
40
|
+
repository_validators = await NodeManager.query(
|
|
41
|
+
db=dbt,
|
|
42
|
+
schema=REPOSITORYVALIDATOR,
|
|
43
|
+
prefetch_relationships=True,
|
|
44
|
+
filters={"repository__id": repository.id},
|
|
45
|
+
)
|
|
46
|
+
new_repository = await convert_object_type(
|
|
47
|
+
node=repository, # type: ignore[arg-type]
|
|
48
|
+
target_schema=target_schema,
|
|
49
|
+
mapping=mapping,
|
|
50
|
+
branch=branch,
|
|
51
|
+
db=dbt,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
for user_validator in user_validators:
|
|
55
|
+
await user_validator.repository.update(db=dbt, data=new_repository)
|
|
56
|
+
await user_validator.repository.save(db=dbt)
|
|
57
|
+
|
|
58
|
+
for repository_validator in repository_validators:
|
|
59
|
+
await repository_validator.repository.update(db=dbt, data=new_repository)
|
|
60
|
+
await repository_validator.repository.save(db=dbt)
|
|
61
|
+
|
|
62
|
+
await validate_conversion(
|
|
63
|
+
deleted_node=repository, # type: ignore[arg-type]
|
|
64
|
+
branch=branch,
|
|
65
|
+
db=dbt,
|
|
66
|
+
timestamp_before_conversion=timestamp_before_conversion,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
# Refresh outside the transaction otherwise other workers would pull outdated branch objects.
|
|
70
|
+
message_bus = await get_message_bus()
|
|
71
|
+
await message_bus.send(RefreshRegistryBranches())
|
|
72
|
+
|
|
73
|
+
# Following call involve a potential update of `commit` value of the newly created repository
|
|
74
|
+
# that would be done from another database connection so it can't be performed within above transaction.
|
|
75
|
+
# Also note since the conversion can only be performed on main branch here, it is fine that we do it
|
|
76
|
+
# after having updating other branches status to NEEDS_REBASE.
|
|
77
|
+
await repository_post_creator.post_create(
|
|
78
|
+
branch=branch,
|
|
79
|
+
obj=new_repository, # type: ignore
|
|
80
|
+
db=db,
|
|
81
|
+
delete_on_connectivity_failure=False,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Delete the RepositoryGroup associated with the old repository, as a new one was created for the new repository.
|
|
85
|
+
repository_groups = (await repository.groups_objects.get_peers(db=db)).values()
|
|
86
|
+
for repository_group in repository_groups:
|
|
87
|
+
await NodeManager.delete(db=db, branch=branch, nodes=[repository_group], cascade_delete=False)
|
|
88
|
+
|
|
89
|
+
return new_repository
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from pydantic import BaseModel
|
|
2
2
|
|
|
3
|
-
from infrahub.core.constants import RelationshipCardinality
|
|
3
|
+
from infrahub.core.constants import BranchSupportType, RelationshipCardinality
|
|
4
4
|
from infrahub.core.schema import NodeSchema
|
|
5
5
|
|
|
6
6
|
|
|
@@ -13,6 +13,21 @@ class SchemaMappingValue(BaseModel):
|
|
|
13
13
|
SchemaMapping = dict[str, SchemaMappingValue]
|
|
14
14
|
|
|
15
15
|
|
|
16
|
+
def _are_branch_support_matching(
|
|
17
|
+
left_branch_support: BranchSupportType | None,
|
|
18
|
+
right_branch_support: BranchSupportType | None,
|
|
19
|
+
) -> bool:
|
|
20
|
+
if left_branch_support == right_branch_support:
|
|
21
|
+
return True
|
|
22
|
+
|
|
23
|
+
local_aware = [BranchSupportType.AWARE, BranchSupportType.LOCAL]
|
|
24
|
+
|
|
25
|
+
if left_branch_support in local_aware and right_branch_support in local_aware:
|
|
26
|
+
return True
|
|
27
|
+
|
|
28
|
+
return False
|
|
29
|
+
|
|
30
|
+
|
|
16
31
|
def get_schema_mapping(source_schema: NodeSchema, target_schema: NodeSchema) -> SchemaMapping:
|
|
17
32
|
"""
|
|
18
33
|
Return fields mapping meant to be used for converting a node from `source_kind` to `target_kind`.
|
|
@@ -31,7 +46,11 @@ def get_schema_mapping(source_schema: NodeSchema, target_schema: NodeSchema) ->
|
|
|
31
46
|
# Process attributes
|
|
32
47
|
for target_attr in target_schema.attributes:
|
|
33
48
|
source_attr = source_attrs.get(target_attr.name)
|
|
34
|
-
if
|
|
49
|
+
if (
|
|
50
|
+
source_attr
|
|
51
|
+
and source_attr.kind == target_attr.kind
|
|
52
|
+
and _are_branch_support_matching(source_attr.branch, target_attr.branch)
|
|
53
|
+
):
|
|
35
54
|
target_field_to_source_field[target_attr.name] = SchemaMappingValue(
|
|
36
55
|
source_field_name=source_attr.name, is_mandatory=not target_attr.optional
|
|
37
56
|
)
|
|
@@ -41,7 +60,12 @@ def get_schema_mapping(source_schema: NodeSchema, target_schema: NodeSchema) ->
|
|
|
41
60
|
# Process relationships
|
|
42
61
|
for target_rel in target_schema.relationships:
|
|
43
62
|
source_rel = source_rels.get(target_rel.name)
|
|
44
|
-
if
|
|
63
|
+
if (
|
|
64
|
+
source_rel
|
|
65
|
+
and source_rel.peer == target_rel.peer
|
|
66
|
+
and source_rel.cardinality == target_rel.cardinality
|
|
67
|
+
and _are_branch_support_matching(source_rel.branch, target_rel.branch)
|
|
68
|
+
):
|
|
45
69
|
target_field_to_source_field[target_rel.name] = SchemaMappingValue(
|
|
46
70
|
source_field_name=source_rel.name,
|
|
47
71
|
is_mandatory=not target_rel.optional,
|
infrahub/core/diff/model/path.py
CHANGED
|
@@ -335,6 +335,10 @@ class EnrichedDiffNode(BaseSummary):
|
|
|
335
335
|
def kind(self) -> str:
|
|
336
336
|
return self.identifier.kind
|
|
337
337
|
|
|
338
|
+
@property
|
|
339
|
+
def is_schema_node(self) -> bool:
|
|
340
|
+
return self.identifier.kind.startswith("Schema")
|
|
341
|
+
|
|
338
342
|
@property
|
|
339
343
|
def num_properties(self) -> int:
|
|
340
344
|
return sum(a.num_properties for a in self.attributes) + sum(r.num_properties for r in self.relationships)
|
|
@@ -36,7 +36,7 @@ async def get_display_labels_per_kind(
|
|
|
36
36
|
break
|
|
37
37
|
node_map = await NodeManager.get_many(ids=limited_ids, fields=fields, db=db, branch=branch)
|
|
38
38
|
for node_id, node in node_map.items():
|
|
39
|
-
display_label_map[node_id] = await node.
|
|
39
|
+
display_label_map[node_id] = await node.get_display_label(db=db)
|
|
40
40
|
offset += limit
|
|
41
41
|
return display_label_map
|
|
42
42
|
|
infrahub/core/graph/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
GRAPH_VERSION =
|
|
1
|
+
GRAPH_VERSION = 44
|
infrahub/core/initialization.py
CHANGED
|
@@ -34,7 +34,7 @@ from infrahub.database import InfrahubDatabase
|
|
|
34
34
|
from infrahub.database.memgraph import IndexManagerMemgraph
|
|
35
35
|
from infrahub.database.neo4j import IndexManagerNeo4j
|
|
36
36
|
from infrahub.exceptions import DatabaseError
|
|
37
|
-
from infrahub.graphql.manager import
|
|
37
|
+
from infrahub.graphql.manager import registry as graphql_registry
|
|
38
38
|
from infrahub.log import get_logger
|
|
39
39
|
from infrahub.menu.utils import create_default_menu
|
|
40
40
|
from infrahub.permissions import PermissionBackend, get_or_create_global_permission
|
|
@@ -50,7 +50,7 @@ async def get_root_node(db: InfrahubDatabase, initialize: bool = False) -> Root:
|
|
|
50
50
|
roots = await Root.get_list(db=db)
|
|
51
51
|
if len(roots) == 0 and not initialize:
|
|
52
52
|
raise DatabaseError(
|
|
53
|
-
"The Database hasn't been initialized for Infrahub, please
|
|
53
|
+
"The Database hasn't been initialized for Infrahub, please 'infrahub server start' to initialize the database."
|
|
54
54
|
)
|
|
55
55
|
|
|
56
56
|
if len(roots) == 0:
|
|
@@ -137,7 +137,8 @@ async def add_indexes(db: InfrahubDatabase) -> None:
|
|
|
137
137
|
await index_manager.add()
|
|
138
138
|
|
|
139
139
|
|
|
140
|
-
async def initialization(db: InfrahubDatabase, add_database_indexes: bool = False) ->
|
|
140
|
+
async def initialization(db: InfrahubDatabase, add_database_indexes: bool = False) -> bool:
|
|
141
|
+
"""Run initialization and setup, returns a boolean to indicate if it's the initial setup."""
|
|
141
142
|
if config.SETTINGS.database.db_type == config.DatabaseType.MEMGRAPH:
|
|
142
143
|
session = await db.session()
|
|
143
144
|
await session.run(query="SET DATABASE SETTING 'log.level' TO 'INFO'")
|
|
@@ -148,6 +149,7 @@ async def initialization(db: InfrahubDatabase, add_database_indexes: bool = Fals
|
|
|
148
149
|
# Initialize the database and Load the Root node
|
|
149
150
|
# ---------------------------------------------------
|
|
150
151
|
async with lock.registry.initialization():
|
|
152
|
+
first_time_initialization = len(await Root.get_list(db=db)) == 0
|
|
151
153
|
log.debug("Checking Root Node")
|
|
152
154
|
await initialize_registry(db=db, initialize=True)
|
|
153
155
|
|
|
@@ -196,7 +198,7 @@ async def initialization(db: InfrahubDatabase, add_database_indexes: bool = Fals
|
|
|
196
198
|
|
|
197
199
|
default_branch = registry.get_branch_from_registry(branch=registry.default_branch)
|
|
198
200
|
schema_branch = registry.schema.get_schema_branch(name=default_branch.name)
|
|
199
|
-
gqlm =
|
|
201
|
+
gqlm = graphql_registry.get_manager_for_branch(branch=default_branch, schema_branch=schema_branch)
|
|
200
202
|
gqlm.get_graphql_schema(
|
|
201
203
|
include_query=True,
|
|
202
204
|
include_mutation=True,
|
|
@@ -210,6 +212,7 @@ async def initialization(db: InfrahubDatabase, add_database_indexes: bool = Fals
|
|
|
210
212
|
ip_namespace = await get_default_ipnamespace(db=db)
|
|
211
213
|
if ip_namespace:
|
|
212
214
|
registry.default_ipnamespace = ip_namespace.id
|
|
215
|
+
return first_time_initialization
|
|
213
216
|
|
|
214
217
|
|
|
215
218
|
async def create_root_node(db: InfrahubDatabase) -> Root:
|