infrahub-server 1.3.2__py3-none-any.whl → 1.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/api/schema.py +2 -2
- infrahub/cli/db.py +194 -13
- infrahub/core/branch/enums.py +8 -0
- infrahub/core/branch/models.py +28 -5
- infrahub/core/branch/tasks.py +5 -7
- infrahub/core/convert_object_type/conversion.py +10 -0
- infrahub/core/diff/coordinator.py +32 -34
- infrahub/core/diff/diff_locker.py +26 -0
- infrahub/core/diff/enricher/hierarchy.py +7 -3
- infrahub/core/diff/query_parser.py +7 -3
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/initialization.py +4 -3
- infrahub/core/merge.py +31 -16
- infrahub/core/migrations/graph/__init__.py +26 -0
- infrahub/core/migrations/graph/m012_convert_account_generic.py +4 -3
- infrahub/core/migrations/graph/m013_convert_git_password_credential.py +4 -3
- infrahub/core/migrations/graph/m032_cleanup_orphaned_branch_relationships.py +105 -0
- infrahub/core/migrations/graph/m033_deduplicate_relationship_vertices.py +97 -0
- infrahub/core/migrations/graph/m034_find_orphaned_schema_fields.py +84 -0
- infrahub/core/migrations/schema/node_attribute_add.py +55 -2
- infrahub/core/migrations/shared.py +37 -9
- infrahub/core/node/__init__.py +44 -21
- infrahub/core/node/resource_manager/ip_address_pool.py +5 -3
- infrahub/core/node/resource_manager/ip_prefix_pool.py +7 -4
- infrahub/core/node/resource_manager/number_pool.py +62 -22
- infrahub/core/node/standard.py +4 -0
- infrahub/core/query/branch.py +25 -56
- infrahub/core/query/node.py +78 -24
- infrahub/core/query/relationship.py +11 -8
- infrahub/core/query/resource_manager.py +117 -20
- infrahub/core/relationship/model.py +10 -5
- infrahub/core/schema/__init__.py +5 -0
- infrahub/core/schema/attribute_parameters.py +6 -0
- infrahub/core/schema/attribute_schema.py +6 -0
- infrahub/core/schema/manager.py +5 -11
- infrahub/core/schema/relationship_schema.py +6 -0
- infrahub/core/schema/schema_branch.py +50 -11
- infrahub/core/validators/node/attribute.py +15 -0
- infrahub/core/validators/tasks.py +12 -4
- infrahub/dependencies/builder/diff/coordinator.py +3 -0
- infrahub/dependencies/builder/diff/locker.py +8 -0
- infrahub/graphql/mutations/main.py +7 -2
- infrahub/graphql/mutations/tasks.py +2 -0
- infrahub/graphql/queries/resource_manager.py +4 -4
- infrahub/tasks/registry.py +63 -35
- infrahub_sdk/client.py +7 -8
- infrahub_sdk/ctl/utils.py +3 -0
- infrahub_sdk/node/node.py +6 -6
- infrahub_sdk/node/relationship.py +43 -2
- infrahub_sdk/yaml.py +13 -7
- infrahub_server-1.3.4.dist-info/LICENSE.txt +201 -0
- {infrahub_server-1.3.2.dist-info → infrahub_server-1.3.4.dist-info}/METADATA +3 -3
- {infrahub_server-1.3.2.dist-info → infrahub_server-1.3.4.dist-info}/RECORD +58 -52
- infrahub_testcontainers/container.py +1 -1
- infrahub_testcontainers/docker-compose-cluster.test.yml +3 -0
- infrahub_testcontainers/docker-compose.test.yml +1 -0
- infrahub_server-1.3.2.dist-info/LICENSE.txt +0 -661
- {infrahub_server-1.3.2.dist-info → infrahub_server-1.3.4.dist-info}/WHEEL +0 -0
- {infrahub_server-1.3.2.dist-info → infrahub_server-1.3.4.dist-info}/entry_points.txt +0 -0
|
@@ -1588,7 +1588,8 @@ class SchemaBranch:
|
|
|
1588
1588
|
|
|
1589
1589
|
self.set(name=name, schema=node)
|
|
1590
1590
|
|
|
1591
|
-
def
|
|
1591
|
+
def _generate_weight_generics(self) -> None:
|
|
1592
|
+
"""Generate order_weight for all generic schemas."""
|
|
1592
1593
|
for name in self.generic_names:
|
|
1593
1594
|
node = self.get(name=name, duplicate=False)
|
|
1594
1595
|
|
|
@@ -1606,6 +1607,8 @@ class SchemaBranch:
|
|
|
1606
1607
|
|
|
1607
1608
|
self.set(name=name, schema=node)
|
|
1608
1609
|
|
|
1610
|
+
def _generate_weight_nodes_profiles(self) -> None:
|
|
1611
|
+
"""Generate order_weight for all nodes and profiles."""
|
|
1609
1612
|
for name in self.node_names + self.profile_names:
|
|
1610
1613
|
node = self.get(name=name, duplicate=False)
|
|
1611
1614
|
|
|
@@ -1630,6 +1633,33 @@ class SchemaBranch:
|
|
|
1630
1633
|
|
|
1631
1634
|
self.set(name=name, schema=node)
|
|
1632
1635
|
|
|
1636
|
+
def _generate_weight_templates(self) -> None:
|
|
1637
|
+
"""Generate order_weight for all templates.
|
|
1638
|
+
|
|
1639
|
+
The order of the fields for the template must respect the order of the node.
|
|
1640
|
+
"""
|
|
1641
|
+
for name in self.template_names:
|
|
1642
|
+
template = self.get(name=name, duplicate=True)
|
|
1643
|
+
node = self.get(name=template.name, duplicate=False)
|
|
1644
|
+
|
|
1645
|
+
node_weights = {
|
|
1646
|
+
item.name: item.order_weight
|
|
1647
|
+
for item in node.attributes + node.relationships
|
|
1648
|
+
if item.order_weight is not None
|
|
1649
|
+
}
|
|
1650
|
+
|
|
1651
|
+
for item in template.attributes + template.relationships:
|
|
1652
|
+
if item.order_weight:
|
|
1653
|
+
continue
|
|
1654
|
+
item.order_weight = node_weights[item.name] + 10000 if item.name in node_weights else None
|
|
1655
|
+
|
|
1656
|
+
self.set(name=name, schema=template)
|
|
1657
|
+
|
|
1658
|
+
def generate_weight(self) -> None:
|
|
1659
|
+
self._generate_weight_generics()
|
|
1660
|
+
self._generate_weight_nodes_profiles()
|
|
1661
|
+
self._generate_weight_templates()
|
|
1662
|
+
|
|
1633
1663
|
def cleanup_inherited_elements(self) -> None:
|
|
1634
1664
|
for name in self.node_names:
|
|
1635
1665
|
node = self.get_node(name=name, duplicate=False)
|
|
@@ -2038,25 +2068,34 @@ class SchemaBranch:
|
|
|
2038
2068
|
if relationship.kind not in [RelationshipKind.ATTRIBUTE, RelationshipKind.GENERIC]
|
|
2039
2069
|
else relationship.peer
|
|
2040
2070
|
)
|
|
2071
|
+
|
|
2072
|
+
is_optional = (
|
|
2073
|
+
relationship.optional if is_autogenerated_subtemplate else relationship.kind != RelationshipKind.PARENT
|
|
2074
|
+
)
|
|
2075
|
+
identifier = (
|
|
2076
|
+
f"template_{relationship.identifier}"
|
|
2077
|
+
if relationship.identifier
|
|
2078
|
+
else self._generate_identifier_string(template_schema.kind, rel_template_peer)
|
|
2079
|
+
)
|
|
2080
|
+
label = (
|
|
2081
|
+
f"{relationship.name} template".title()
|
|
2082
|
+
if relationship.kind in [RelationshipKind.COMPONENT, RelationshipKind.PARENT]
|
|
2083
|
+
else relationship.name.title()
|
|
2084
|
+
)
|
|
2085
|
+
|
|
2041
2086
|
template_schema.relationships.append(
|
|
2042
2087
|
RelationshipSchema(
|
|
2043
2088
|
name=relationship.name,
|
|
2044
2089
|
peer=rel_template_peer,
|
|
2045
2090
|
kind=relationship.kind,
|
|
2046
|
-
optional=
|
|
2047
|
-
if is_autogenerated_subtemplate
|
|
2048
|
-
else relationship.kind != RelationshipKind.PARENT,
|
|
2091
|
+
optional=is_optional,
|
|
2049
2092
|
cardinality=relationship.cardinality,
|
|
2050
2093
|
direction=relationship.direction,
|
|
2051
2094
|
branch=relationship.branch,
|
|
2052
|
-
identifier=
|
|
2053
|
-
if relationship.identifier
|
|
2054
|
-
else self._generate_identifier_string(template_schema.kind, rel_template_peer),
|
|
2095
|
+
identifier=identifier,
|
|
2055
2096
|
min_count=relationship.min_count,
|
|
2056
2097
|
max_count=relationship.max_count,
|
|
2057
|
-
label=
|
|
2058
|
-
if relationship.kind in [RelationshipKind.COMPONENT, RelationshipKind.PARENT]
|
|
2059
|
-
else relationship.name.title(),
|
|
2098
|
+
label=label,
|
|
2060
2099
|
inherited=relationship.inherited,
|
|
2061
2100
|
)
|
|
2062
2101
|
)
|
|
@@ -2144,7 +2183,7 @@ class SchemaBranch:
|
|
|
2144
2183
|
attr_schema_class = get_attribute_schema_class_for_kind(kind=node_attr.kind)
|
|
2145
2184
|
attr = attr_schema_class(
|
|
2146
2185
|
optional=node_attr.optional if is_autogenerated_subtemplate else True,
|
|
2147
|
-
**node_attr.model_dump(exclude=["id", "unique", "optional", "read_only"]),
|
|
2186
|
+
**node_attr.model_dump(exclude=["id", "unique", "optional", "read_only", "order_weight"]),
|
|
2148
2187
|
)
|
|
2149
2188
|
template.attributes.append(attr)
|
|
2150
2189
|
|
|
@@ -2,6 +2,9 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
from typing import TYPE_CHECKING
|
|
4
4
|
|
|
5
|
+
from infrahub.core import registry
|
|
6
|
+
from infrahub.core.schema.attribute_parameters import NumberPoolParameters
|
|
7
|
+
|
|
5
8
|
from ..interface import ConstraintCheckerInterface
|
|
6
9
|
from ..query import NodeNotPresentValidatorQuery
|
|
7
10
|
|
|
@@ -31,10 +34,22 @@ class NodeAttributeAddChecker(ConstraintCheckerInterface):
|
|
|
31
34
|
grouped_data_paths_list: list[GroupedDataPaths] = []
|
|
32
35
|
if not request.schema_path.field_name:
|
|
33
36
|
raise ValueError("field_name is not defined")
|
|
37
|
+
|
|
34
38
|
attribute_schema = request.node_schema.get_attribute(name=request.schema_path.field_name)
|
|
35
39
|
if attribute_schema.optional is True or attribute_schema.default_value is not None:
|
|
36
40
|
return grouped_data_paths_list
|
|
37
41
|
|
|
42
|
+
# If the attribute is a NumberPool, we need to ensure that the pool is big enough for all existing nodes
|
|
43
|
+
if attribute_schema.kind == "NumberPool" and isinstance(attribute_schema.parameters, NumberPoolParameters):
|
|
44
|
+
nbr_nodes = await registry.manager.count(db=self.db, branch=self.branch, schema=request.node_schema)
|
|
45
|
+
pool_size = attribute_schema.parameters.get_pool_size()
|
|
46
|
+
|
|
47
|
+
if pool_size < nbr_nodes:
|
|
48
|
+
raise ValueError(
|
|
49
|
+
f"The size of the NumberPool is smaller than the number of existing nodes {pool_size} < {nbr_nodes}."
|
|
50
|
+
)
|
|
51
|
+
return grouped_data_paths_list
|
|
52
|
+
|
|
38
53
|
for query_class in self.query_classes:
|
|
39
54
|
# TODO add exception handling
|
|
40
55
|
query = await query_class.init(
|
|
@@ -11,9 +11,7 @@ from infrahub.core.branch import Branch # noqa: TC001
|
|
|
11
11
|
from infrahub.core.path import SchemaPath # noqa: TC001
|
|
12
12
|
from infrahub.core.schema import GenericSchema, NodeSchema
|
|
13
13
|
from infrahub.core.validators.aggregated_checker import AggregatedConstraintChecker
|
|
14
|
-
from infrahub.core.validators.model import
|
|
15
|
-
SchemaConstraintValidatorRequest,
|
|
16
|
-
)
|
|
14
|
+
from infrahub.core.validators.model import SchemaConstraintValidatorRequest, SchemaViolation
|
|
17
15
|
from infrahub.dependencies.registry import get_component_registry
|
|
18
16
|
from infrahub.services import InfrahubServices # noqa: TC001 needed for prefect flow
|
|
19
17
|
from infrahub.workflows.utils import add_tags
|
|
@@ -84,7 +82,17 @@ async def schema_path_validate(
|
|
|
84
82
|
aggregated_constraint_checker = await component_registry.get_component(
|
|
85
83
|
AggregatedConstraintChecker, db=db, branch=branch
|
|
86
84
|
)
|
|
87
|
-
|
|
85
|
+
try:
|
|
86
|
+
violations = await aggregated_constraint_checker.run_constraints(constraint_request)
|
|
87
|
+
except Exception as exc:
|
|
88
|
+
violation = SchemaViolation(
|
|
89
|
+
node_id="unknown",
|
|
90
|
+
node_kind=node_schema.kind,
|
|
91
|
+
display_label=f"Error validating {constraint_name} on {node_schema.kind}",
|
|
92
|
+
full_display_label=f"Error validating {constraint_name} on {node_schema.kind}",
|
|
93
|
+
message=str(exc),
|
|
94
|
+
)
|
|
95
|
+
violations = [violation]
|
|
88
96
|
|
|
89
97
|
return SchemaValidatorPathResponseData(
|
|
90
98
|
violations=violations, constraint_name=constraint_name, schema_path=schema_path
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from infrahub.core.diff.coordinator import DiffCoordinator
|
|
2
|
+
from infrahub.dependencies.builder.diff.locker import DiffLockerDependency
|
|
2
3
|
from infrahub.dependencies.interface import DependencyBuilder, DependencyBuilderContext
|
|
3
4
|
|
|
4
5
|
from .calculator import DiffCalculatorDependency
|
|
@@ -15,6 +16,7 @@ class DiffCoordinatorDependency(DependencyBuilder[DiffCoordinator]):
|
|
|
15
16
|
@classmethod
|
|
16
17
|
def build(cls, context: DependencyBuilderContext) -> DiffCoordinator:
|
|
17
18
|
return DiffCoordinator(
|
|
19
|
+
db=context.db,
|
|
18
20
|
diff_repo=DiffRepositoryDependency.build(context=context),
|
|
19
21
|
diff_calculator=DiffCalculatorDependency.build(context=context),
|
|
20
22
|
diff_combiner=DiffCombinerDependency.build(context=context),
|
|
@@ -23,4 +25,5 @@ class DiffCoordinatorDependency(DependencyBuilder[DiffCoordinator]):
|
|
|
23
25
|
labels_enricher=DiffLabelsEnricherDependency.build(context=context),
|
|
24
26
|
data_check_synchronizer=DiffDataCheckSynchronizerDependency.build(context=context),
|
|
25
27
|
conflict_transferer=DiffConflictTransfererDependency.build(context=context),
|
|
28
|
+
diff_locker=DiffLockerDependency.build(context=context),
|
|
26
29
|
)
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
from infrahub.core.diff.diff_locker import DiffLocker
|
|
2
|
+
from infrahub.dependencies.interface import DependencyBuilder, DependencyBuilderContext
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class DiffLockerDependency(DependencyBuilder[DiffLocker]):
|
|
6
|
+
@classmethod
|
|
7
|
+
def build(cls, context: DependencyBuilderContext) -> DiffLocker: # noqa: ARG003
|
|
8
|
+
return DiffLocker()
|
|
@@ -376,8 +376,13 @@ class InfrahubMutationMixin:
|
|
|
376
376
|
return updated_obj, mutation, False
|
|
377
377
|
|
|
378
378
|
try:
|
|
379
|
-
|
|
380
|
-
|
|
379
|
+
# This is a hack to avoid sitatuions where a node has an attribute or relationship called "pop"
|
|
380
|
+
# which would have overridden the `pop` method of the InputObjectType object and as such would have
|
|
381
|
+
# caused an error when trying to call `data.pop("hfid", None)`.
|
|
382
|
+
# TypeError: 'NoneType' object is not callable
|
|
383
|
+
data._pop = dict.pop.__get__(data, dict)
|
|
384
|
+
data._pop("hfid", None) # `hfid` is invalid for creation.
|
|
385
|
+
created_obj, mutation = await cls.mutate_create(info=info, data=data, branch=branch)
|
|
381
386
|
return created_obj, mutation, True
|
|
382
387
|
except HFIDViolatedError as exc:
|
|
383
388
|
# Only the HFID constraint has been violated, it means the node exists and we can update without rerunning constraints
|
|
@@ -6,6 +6,7 @@ from infrahub.context import InfrahubContext # noqa: TC001 needed for prefect
|
|
|
6
6
|
from infrahub.core import registry
|
|
7
7
|
from infrahub.core.branch import Branch
|
|
8
8
|
from infrahub.core.diff.coordinator import DiffCoordinator
|
|
9
|
+
from infrahub.core.diff.diff_locker import DiffLocker
|
|
9
10
|
from infrahub.core.diff.merger.merger import DiffMerger
|
|
10
11
|
from infrahub.core.diff.repository.repository import DiffRepository
|
|
11
12
|
from infrahub.core.merge import BranchMerger
|
|
@@ -50,6 +51,7 @@ async def merge_branch_mutation(branch: str, context: InfrahubContext, service:
|
|
|
50
51
|
diff_merger=diff_merger,
|
|
51
52
|
diff_repository=diff_repository,
|
|
52
53
|
source_branch=obj,
|
|
54
|
+
diff_locker=DiffLocker(),
|
|
53
55
|
service=service,
|
|
54
56
|
)
|
|
55
57
|
candidate_schema = merger.get_candidate_schema()
|
|
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
from typing import TYPE_CHECKING, Any
|
|
4
4
|
|
|
5
|
-
from graphene import Field, Float, Int, List, NonNull, ObjectType, String
|
|
5
|
+
from graphene import BigInt, Field, Float, Int, List, NonNull, ObjectType, String
|
|
6
6
|
from infrahub_sdk.utils import extract_fields_first_node
|
|
7
7
|
|
|
8
8
|
from infrahub.core import registry
|
|
@@ -33,7 +33,7 @@ class IPPoolUtilizationResource(ObjectType):
|
|
|
33
33
|
id = Field(String, required=True, description="The ID of the current resource")
|
|
34
34
|
display_label = Field(String, required=True, description="The common name of the resource")
|
|
35
35
|
kind = Field(String, required=True, description="The resource kind")
|
|
36
|
-
weight = Field(
|
|
36
|
+
weight = Field(BigInt, required=True, description="The relative weight of this resource.")
|
|
37
37
|
utilization = Field(Float, required=True, description="The overall utilization of the resource.")
|
|
38
38
|
utilization_branches = Field(
|
|
39
39
|
Float, required=True, description="The utilization of the resource on all non default branches."
|
|
@@ -70,7 +70,7 @@ def _validate_pool_type(pool_id: str, pool: CoreNode | None = None) -> CoreNode:
|
|
|
70
70
|
|
|
71
71
|
|
|
72
72
|
class PoolAllocated(ObjectType):
|
|
73
|
-
count = Field(
|
|
73
|
+
count = Field(BigInt, required=True, description="The number of allocations within the selected pool.")
|
|
74
74
|
edges = Field(List(of_type=NonNull(PoolAllocatedEdge), required=True), required=True)
|
|
75
75
|
|
|
76
76
|
@staticmethod
|
|
@@ -174,7 +174,7 @@ class PoolAllocated(ObjectType):
|
|
|
174
174
|
|
|
175
175
|
|
|
176
176
|
class PoolUtilization(ObjectType):
|
|
177
|
-
count = Field(
|
|
177
|
+
count = Field(BigInt, required=True, description="The number of resources within the selected pool.")
|
|
178
178
|
utilization = Field(Float, required=True, description="The overall utilization of the pool.")
|
|
179
179
|
utilization_branches = Field(Float, required=True, description="The utilization in all non default branches.")
|
|
180
180
|
utilization_default_branch = Field(
|
infrahub/tasks/registry.py
CHANGED
|
@@ -1,17 +1,77 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
from typing import TYPE_CHECKING
|
|
2
4
|
|
|
3
5
|
from infrahub import lock
|
|
4
6
|
from infrahub.core import registry
|
|
5
|
-
from infrahub.database import InfrahubDatabase
|
|
6
7
|
from infrahub.log import get_logger
|
|
7
8
|
from infrahub.worker import WORKER_IDENTITY
|
|
8
9
|
|
|
9
10
|
if TYPE_CHECKING:
|
|
10
11
|
from infrahub.core.branch import Branch
|
|
12
|
+
from infrahub.core.schema.schema_branch import SchemaBranch
|
|
13
|
+
from infrahub.database import InfrahubDatabase
|
|
11
14
|
|
|
12
15
|
log = get_logger()
|
|
13
16
|
|
|
14
17
|
|
|
18
|
+
def update_graphql_schema(branch: Branch, schema_branch: SchemaBranch) -> None:
|
|
19
|
+
"""
|
|
20
|
+
Update the GraphQL schema for the given branch.
|
|
21
|
+
"""
|
|
22
|
+
from infrahub.graphql.manager import GraphQLSchemaManager
|
|
23
|
+
|
|
24
|
+
gqlm = GraphQLSchemaManager.get_manager_for_branch(branch=branch, schema_branch=schema_branch)
|
|
25
|
+
gqlm.get_graphql_schema(
|
|
26
|
+
include_query=True,
|
|
27
|
+
include_mutation=True,
|
|
28
|
+
include_subscription=True,
|
|
29
|
+
include_types=True,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def create_branch_registry(db: InfrahubDatabase, branch: Branch) -> None:
|
|
34
|
+
"""Create a new entry in the registry for a given branch."""
|
|
35
|
+
|
|
36
|
+
log.info("New branch detected, pulling schema", branch=branch.name, worker=WORKER_IDENTITY)
|
|
37
|
+
await registry.schema.load_schema(db=db, branch=branch)
|
|
38
|
+
registry.branch[branch.name] = branch
|
|
39
|
+
schema_branch = registry.schema.get_schema_branch(name=branch.name)
|
|
40
|
+
update_graphql_schema(branch=branch, schema_branch=schema_branch)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
async def update_branch_registry(db: InfrahubDatabase, branch: Branch) -> None:
|
|
44
|
+
"""Update the registry for a branch if the schema hash has changed."""
|
|
45
|
+
|
|
46
|
+
existing_branch: Branch = registry.branch[branch.name]
|
|
47
|
+
|
|
48
|
+
if not existing_branch.schema_hash:
|
|
49
|
+
log.warning("Branch schema hash is not set, cannot update branch registry")
|
|
50
|
+
return
|
|
51
|
+
|
|
52
|
+
if existing_branch.schema_hash and existing_branch.schema_hash.main == branch.active_schema_hash.main:
|
|
53
|
+
log.debug(
|
|
54
|
+
"Branch schema hash is the same, no need to update branch registry",
|
|
55
|
+
branch=branch.name,
|
|
56
|
+
hash=existing_branch.schema_hash.main,
|
|
57
|
+
worker=WORKER_IDENTITY,
|
|
58
|
+
)
|
|
59
|
+
return
|
|
60
|
+
|
|
61
|
+
log.info(
|
|
62
|
+
"New hash detected",
|
|
63
|
+
branch=branch.name,
|
|
64
|
+
hash_current=existing_branch.schema_hash.main,
|
|
65
|
+
hash_new=branch.active_schema_hash.main,
|
|
66
|
+
worker=WORKER_IDENTITY,
|
|
67
|
+
)
|
|
68
|
+
await registry.schema.load_schema(db=db, branch=branch)
|
|
69
|
+
registry.branch[branch.name] = branch
|
|
70
|
+
schema_branch = registry.schema.get_schema_branch(name=branch.name)
|
|
71
|
+
|
|
72
|
+
update_graphql_schema(branch=branch, schema_branch=schema_branch)
|
|
73
|
+
|
|
74
|
+
|
|
15
75
|
async def refresh_branches(db: InfrahubDatabase) -> None:
|
|
16
76
|
"""Pull all the branches from the database and update the registry.
|
|
17
77
|
|
|
@@ -24,41 +84,9 @@ async def refresh_branches(db: InfrahubDatabase) -> None:
|
|
|
24
84
|
branches = await registry.branch_object.get_list(db=db)
|
|
25
85
|
for new_branch in branches:
|
|
26
86
|
if new_branch.name in registry.branch:
|
|
27
|
-
|
|
28
|
-
if (
|
|
29
|
-
branch_registry.schema_hash
|
|
30
|
-
and branch_registry.schema_hash.main != new_branch.active_schema_hash.main
|
|
31
|
-
):
|
|
32
|
-
log.info(
|
|
33
|
-
"New hash detected",
|
|
34
|
-
branch=new_branch.name,
|
|
35
|
-
hash_current=branch_registry.schema_hash.main,
|
|
36
|
-
hash_new=new_branch.active_schema_hash.main,
|
|
37
|
-
worker=WORKER_IDENTITY,
|
|
38
|
-
)
|
|
39
|
-
await registry.schema.load_schema(db=db, branch=new_branch)
|
|
40
|
-
registry.branch[new_branch.name] = new_branch
|
|
41
|
-
schema_branch = registry.schema.get_schema_branch(name=new_branch.name)
|
|
42
|
-
gqlm = GraphQLSchemaManager.get_manager_for_branch(branch=new_branch, schema_branch=schema_branch)
|
|
43
|
-
gqlm.get_graphql_schema(
|
|
44
|
-
include_query=True,
|
|
45
|
-
include_mutation=True,
|
|
46
|
-
include_subscription=True,
|
|
47
|
-
include_types=True,
|
|
48
|
-
)
|
|
49
|
-
|
|
87
|
+
await update_branch_registry(db=db, branch=new_branch)
|
|
50
88
|
else:
|
|
51
|
-
|
|
52
|
-
await registry.schema.load_schema(db=db, branch=new_branch)
|
|
53
|
-
registry.branch[new_branch.name] = new_branch
|
|
54
|
-
schema_branch = registry.schema.get_schema_branch(name=new_branch.name)
|
|
55
|
-
gqlm = GraphQLSchemaManager.get_manager_for_branch(branch=new_branch, schema_branch=schema_branch)
|
|
56
|
-
gqlm.get_graphql_schema(
|
|
57
|
-
include_query=True,
|
|
58
|
-
include_mutation=True,
|
|
59
|
-
include_subscription=True,
|
|
60
|
-
include_types=True,
|
|
61
|
-
)
|
|
89
|
+
await create_branch_registry(db=db, branch=new_branch)
|
|
62
90
|
|
|
63
91
|
purged_branches = await registry.purge_inactive_branches(db=db, active_branches=branches)
|
|
64
92
|
purged_branches.update(
|
infrahub_sdk/client.py
CHANGED
|
@@ -784,7 +784,6 @@ class InfrahubClient(BaseClient):
|
|
|
784
784
|
if at:
|
|
785
785
|
at = Timestamp(at)
|
|
786
786
|
|
|
787
|
-
node = InfrahubNode(client=self, schema=schema, branch=branch)
|
|
788
787
|
filters = kwargs
|
|
789
788
|
pagination_size = self.pagination_size
|
|
790
789
|
|
|
@@ -825,12 +824,12 @@ class InfrahubClient(BaseClient):
|
|
|
825
824
|
nodes = []
|
|
826
825
|
related_nodes = []
|
|
827
826
|
batch_process = await self.create_batch()
|
|
828
|
-
count = await self.count(kind=schema.kind, partial_match=partial_match, **filters)
|
|
827
|
+
count = await self.count(kind=schema.kind, branch=branch, partial_match=partial_match, **filters)
|
|
829
828
|
total_pages = (count + pagination_size - 1) // pagination_size
|
|
830
829
|
|
|
831
830
|
for page_number in range(1, total_pages + 1):
|
|
832
831
|
page_offset = (page_number - 1) * pagination_size
|
|
833
|
-
batch_process.add(task=process_page,
|
|
832
|
+
batch_process.add(task=process_page, page_offset=page_offset, page_number=page_number)
|
|
834
833
|
|
|
835
834
|
async for _, response in batch_process.execute():
|
|
836
835
|
nodes.extend(response[1]["nodes"])
|
|
@@ -847,7 +846,7 @@ class InfrahubClient(BaseClient):
|
|
|
847
846
|
|
|
848
847
|
while has_remaining_items:
|
|
849
848
|
page_offset = (page_number - 1) * pagination_size
|
|
850
|
-
response, process_result = await process_page(page_offset, page_number)
|
|
849
|
+
response, process_result = await process_page(page_offset=page_offset, page_number=page_number)
|
|
851
850
|
|
|
852
851
|
nodes.extend(process_result["nodes"])
|
|
853
852
|
related_nodes.extend(process_result["related_nodes"])
|
|
@@ -1946,9 +1945,9 @@ class InfrahubClientSync(BaseClient):
|
|
|
1946
1945
|
"""
|
|
1947
1946
|
branch = branch or self.default_branch
|
|
1948
1947
|
schema = self.schema.get(kind=kind, branch=branch)
|
|
1949
|
-
node = InfrahubNodeSync(client=self, schema=schema, branch=branch)
|
|
1950
1948
|
if at:
|
|
1951
1949
|
at = Timestamp(at)
|
|
1950
|
+
|
|
1952
1951
|
filters = kwargs
|
|
1953
1952
|
pagination_size = self.pagination_size
|
|
1954
1953
|
|
|
@@ -1990,12 +1989,12 @@ class InfrahubClientSync(BaseClient):
|
|
|
1990
1989
|
related_nodes = []
|
|
1991
1990
|
batch_process = self.create_batch()
|
|
1992
1991
|
|
|
1993
|
-
count = self.count(kind=schema.kind, partial_match=partial_match, **filters)
|
|
1992
|
+
count = self.count(kind=schema.kind, branch=branch, partial_match=partial_match, **filters)
|
|
1994
1993
|
total_pages = (count + pagination_size - 1) // pagination_size
|
|
1995
1994
|
|
|
1996
1995
|
for page_number in range(1, total_pages + 1):
|
|
1997
1996
|
page_offset = (page_number - 1) * pagination_size
|
|
1998
|
-
batch_process.add(task=process_page,
|
|
1997
|
+
batch_process.add(task=process_page, page_offset=page_offset, page_number=page_number)
|
|
1999
1998
|
|
|
2000
1999
|
for _, response in batch_process.execute():
|
|
2001
2000
|
nodes.extend(response[1]["nodes"])
|
|
@@ -2012,7 +2011,7 @@ class InfrahubClientSync(BaseClient):
|
|
|
2012
2011
|
|
|
2013
2012
|
while has_remaining_items:
|
|
2014
2013
|
page_offset = (page_number - 1) * pagination_size
|
|
2015
|
-
response, process_result = process_page(page_offset, page_number)
|
|
2014
|
+
response, process_result = process_page(page_offset=page_offset, page_number=page_number)
|
|
2016
2015
|
|
|
2017
2016
|
nodes.extend(process_result["nodes"])
|
|
2018
2017
|
related_nodes.extend(process_result["related_nodes"])
|
infrahub_sdk/ctl/utils.py
CHANGED
|
@@ -187,6 +187,9 @@ def load_yamlfile_from_disk_and_exit(
|
|
|
187
187
|
has_error = False
|
|
188
188
|
try:
|
|
189
189
|
data_files = file_type.load_from_disk(paths=paths)
|
|
190
|
+
if not data_files:
|
|
191
|
+
console.print("[red]No valid files found to load.")
|
|
192
|
+
raise typer.Exit(1)
|
|
190
193
|
except FileNotValidError as exc:
|
|
191
194
|
console.print(f"[red]{exc.message}")
|
|
192
195
|
raise typer.Exit(1) from exc
|
infrahub_sdk/node/node.py
CHANGED
|
@@ -402,10 +402,10 @@ class InfrahubNodeBase:
|
|
|
402
402
|
if order:
|
|
403
403
|
data["@filters"]["order"] = order
|
|
404
404
|
|
|
405
|
-
if offset:
|
|
405
|
+
if offset is not None:
|
|
406
406
|
data["@filters"]["offset"] = offset
|
|
407
407
|
|
|
408
|
-
if limit:
|
|
408
|
+
if limit is not None:
|
|
409
409
|
data["@filters"]["limit"] = limit
|
|
410
410
|
|
|
411
411
|
if include and exclude:
|
|
@@ -1493,15 +1493,15 @@ class InfrahubNodeSync(InfrahubNodeBase):
|
|
|
1493
1493
|
for rel_name in self._relationships:
|
|
1494
1494
|
rel = getattr(self, rel_name)
|
|
1495
1495
|
if rel and isinstance(rel, RelatedNodeSync):
|
|
1496
|
-
relation = node_data["node"].get(rel_name)
|
|
1497
|
-
if relation.get("node", None):
|
|
1496
|
+
relation = node_data["node"].get(rel_name, None)
|
|
1497
|
+
if relation and relation.get("node", None):
|
|
1498
1498
|
related_node = InfrahubNodeSync.from_graphql(
|
|
1499
1499
|
client=self._client, branch=branch, data=relation, timeout=timeout
|
|
1500
1500
|
)
|
|
1501
1501
|
related_nodes.append(related_node)
|
|
1502
1502
|
elif rel and isinstance(rel, RelationshipManagerSync):
|
|
1503
|
-
peers = node_data["node"].get(rel_name)
|
|
1504
|
-
if peers:
|
|
1503
|
+
peers = node_data["node"].get(rel_name, None)
|
|
1504
|
+
if peers and peers["edges"]:
|
|
1505
1505
|
for peer in peers["edges"]:
|
|
1506
1506
|
related_node = InfrahubNodeSync.from_graphql(
|
|
1507
1507
|
client=self._client, branch=branch, data=peer, timeout=timeout
|
|
@@ -1,11 +1,15 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
from collections import defaultdict
|
|
3
4
|
from collections.abc import Iterable
|
|
4
5
|
from typing import TYPE_CHECKING, Any
|
|
5
6
|
|
|
7
|
+
from ..batch import InfrahubBatch
|
|
6
8
|
from ..exceptions import (
|
|
9
|
+
Error,
|
|
7
10
|
UninitializedError,
|
|
8
11
|
)
|
|
12
|
+
from ..types import Order
|
|
9
13
|
from .constants import PROPERTIES_FLAG, PROPERTIES_OBJECT
|
|
10
14
|
from .related_node import RelatedNode, RelatedNodeSync
|
|
11
15
|
|
|
@@ -156,8 +160,26 @@ class RelationshipManager(RelationshipManagerBase):
|
|
|
156
160
|
self.peers = rm.peers
|
|
157
161
|
self.initialized = True
|
|
158
162
|
|
|
163
|
+
ids_per_kind_map = defaultdict(list)
|
|
159
164
|
for peer in self.peers:
|
|
160
|
-
|
|
165
|
+
if not peer.id or not peer.typename:
|
|
166
|
+
raise Error("Unable to fetch the peer, id and/or typename are not defined")
|
|
167
|
+
ids_per_kind_map[peer.typename].append(peer.id)
|
|
168
|
+
|
|
169
|
+
batch = InfrahubBatch(max_concurrent_execution=self.client.max_concurrent_execution)
|
|
170
|
+
for kind, ids in ids_per_kind_map.items():
|
|
171
|
+
batch.add(
|
|
172
|
+
task=self.client.filters,
|
|
173
|
+
kind=kind,
|
|
174
|
+
ids=ids,
|
|
175
|
+
populate_store=True,
|
|
176
|
+
branch=self.branch,
|
|
177
|
+
parallel=True,
|
|
178
|
+
order=Order(disable=True),
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
async for _ in batch.execute():
|
|
182
|
+
pass
|
|
161
183
|
|
|
162
184
|
def add(self, data: str | RelatedNode | dict) -> None:
|
|
163
185
|
"""Add a new peer to this relationship."""
|
|
@@ -261,8 +283,27 @@ class RelationshipManagerSync(RelationshipManagerBase):
|
|
|
261
283
|
self.peers = rm.peers
|
|
262
284
|
self.initialized = True
|
|
263
285
|
|
|
286
|
+
ids_per_kind_map = defaultdict(list)
|
|
264
287
|
for peer in self.peers:
|
|
265
|
-
peer.
|
|
288
|
+
if not peer.id or not peer.typename:
|
|
289
|
+
raise Error("Unable to fetch the peer, id and/or typename are not defined")
|
|
290
|
+
ids_per_kind_map[peer.typename].append(peer.id)
|
|
291
|
+
|
|
292
|
+
# Unlike Async, no need to create a new batch from scratch because we are not using a semaphore
|
|
293
|
+
batch = self.client.create_batch()
|
|
294
|
+
for kind, ids in ids_per_kind_map.items():
|
|
295
|
+
batch.add(
|
|
296
|
+
task=self.client.filters,
|
|
297
|
+
kind=kind,
|
|
298
|
+
ids=ids,
|
|
299
|
+
populate_store=True,
|
|
300
|
+
branch=self.branch,
|
|
301
|
+
parallel=True,
|
|
302
|
+
order=Order(disable=True),
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
for _ in batch.execute():
|
|
306
|
+
pass
|
|
266
307
|
|
|
267
308
|
def add(self, data: str | RelatedNodeSync | dict) -> None:
|
|
268
309
|
"""Add a new peer to this relationship."""
|
infrahub_sdk/yaml.py
CHANGED
|
@@ -120,16 +120,22 @@ class YamlFile(LocalFile):
|
|
|
120
120
|
@classmethod
|
|
121
121
|
def load_from_disk(cls, paths: list[Path]) -> list[Self]:
|
|
122
122
|
yaml_files: list[Self] = []
|
|
123
|
+
file_extensions = {".yaml", ".yml", ".json"} # FIXME: .json is not a YAML file, should be removed
|
|
124
|
+
|
|
123
125
|
for file_path in paths:
|
|
124
|
-
if file_path.
|
|
125
|
-
|
|
126
|
+
if not file_path.exists():
|
|
127
|
+
# Check if the provided path exists, relevant for the first call coming from the user
|
|
128
|
+
raise FileNotValidError(name=str(file_path), message=f"{file_path} does not exist!")
|
|
129
|
+
if file_path.is_file():
|
|
130
|
+
if file_path.suffix in file_extensions:
|
|
131
|
+
yaml_files.extend(cls.load_file_from_disk(path=file_path))
|
|
132
|
+
# else: silently skip files with unrelevant extensions (e.g. .md, .py...)
|
|
126
133
|
elif file_path.is_dir():
|
|
134
|
+
# Introduce recursion to handle sub-folders
|
|
127
135
|
sub_paths = [Path(sub_file_path) for sub_file_path in file_path.glob("*")]
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
else:
|
|
132
|
-
raise FileNotValidError(name=str(file_path), message=f"{file_path} does not exist!")
|
|
136
|
+
sub_paths = sorted(sub_paths, key=lambda p: p.name)
|
|
137
|
+
yaml_files.extend(cls.load_from_disk(paths=sub_paths))
|
|
138
|
+
# else: skip non-file, non-dir (e.g., symlink...)
|
|
133
139
|
|
|
134
140
|
return yaml_files
|
|
135
141
|
|