infrahub-server 1.3.3__py3-none-any.whl → 1.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/api/schema.py +2 -2
- infrahub/cli/db.py +34 -0
- infrahub/core/convert_object_type/conversion.py +10 -0
- infrahub/core/diff/enricher/hierarchy.py +7 -3
- infrahub/core/diff/query_parser.py +7 -3
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/migrations/graph/__init__.py +2 -0
- infrahub/core/migrations/graph/m034_find_orphaned_schema_fields.py +84 -0
- infrahub/core/migrations/query/node_duplicate.py +5 -1
- infrahub/core/migrations/schema/node_attribute_add.py +55 -2
- infrahub/core/migrations/shared.py +37 -9
- infrahub/core/node/__init__.py +41 -21
- infrahub/core/node/resource_manager/number_pool.py +60 -22
- infrahub/core/query/diff.py +17 -3
- infrahub/core/query/relationship.py +8 -11
- infrahub/core/query/resource_manager.py +117 -20
- infrahub/core/schema/__init__.py +5 -0
- infrahub/core/schema/attribute_parameters.py +6 -0
- infrahub/core/schema/attribute_schema.py +6 -0
- infrahub/core/schema/manager.py +5 -11
- infrahub/core/schema/relationship_schema.py +6 -0
- infrahub/core/schema/schema_branch.py +72 -11
- infrahub/core/validators/node/attribute.py +15 -0
- infrahub/core/validators/tasks.py +12 -4
- infrahub/generators/tasks.py +1 -1
- infrahub/git/integrator.py +1 -1
- infrahub/git/tasks.py +2 -2
- infrahub/graphql/mutations/main.py +24 -5
- infrahub/graphql/queries/resource_manager.py +4 -4
- infrahub/proposed_change/tasks.py +2 -2
- infrahub/tasks/registry.py +63 -35
- infrahub_sdk/client.py +7 -8
- infrahub_sdk/ctl/utils.py +3 -0
- infrahub_sdk/node/node.py +87 -15
- infrahub_sdk/node/relationship.py +43 -2
- infrahub_sdk/protocols_base.py +0 -2
- infrahub_sdk/protocols_generator/constants.py +1 -0
- infrahub_sdk/utils.py +0 -17
- infrahub_sdk/yaml.py +13 -7
- infrahub_server-1.3.5.dist-info/LICENSE.txt +201 -0
- {infrahub_server-1.3.3.dist-info → infrahub_server-1.3.5.dist-info}/METADATA +3 -3
- {infrahub_server-1.3.3.dist-info → infrahub_server-1.3.5.dist-info}/RECORD +44 -43
- infrahub_server-1.3.3.dist-info/LICENSE.txt +0 -661
- {infrahub_server-1.3.3.dist-info → infrahub_server-1.3.5.dist-info}/WHEEL +0 -0
- {infrahub_server-1.3.3.dist-info → infrahub_server-1.3.5.dist-info}/entry_points.txt +0 -0
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import hashlib
|
|
3
4
|
from dataclasses import dataclass, field
|
|
4
5
|
from typing import TYPE_CHECKING, Any
|
|
5
6
|
|
|
@@ -152,7 +153,7 @@ class InfrahubMutationMixin:
|
|
|
152
153
|
"""
|
|
153
154
|
schema_branch = db.schema.get_schema_branch(name=branch.name)
|
|
154
155
|
lock_names = _get_kind_lock_names_on_object_mutation(
|
|
155
|
-
kind=cls._meta.active_schema.kind, branch=branch, schema_branch=schema_branch
|
|
156
|
+
kind=cls._meta.active_schema.kind, branch=branch, schema_branch=schema_branch, data=data
|
|
156
157
|
)
|
|
157
158
|
if lock_names:
|
|
158
159
|
async with InfrahubMultiLock(lock_registry=lock.registry, locks=lock_names):
|
|
@@ -216,7 +217,7 @@ class InfrahubMutationMixin:
|
|
|
216
217
|
|
|
217
218
|
schema_branch = db.schema.get_schema_branch(name=branch.name)
|
|
218
219
|
lock_names = _get_kind_lock_names_on_object_mutation(
|
|
219
|
-
kind=cls._meta.active_schema.kind, branch=branch, schema_branch=schema_branch
|
|
220
|
+
kind=cls._meta.active_schema.kind, branch=branch, schema_branch=schema_branch, data=data
|
|
220
221
|
)
|
|
221
222
|
|
|
222
223
|
if db.is_transaction:
|
|
@@ -266,7 +267,6 @@ class InfrahubMutationMixin:
|
|
|
266
267
|
obj = node or await NodeManager.find_object(
|
|
267
268
|
db=db, kind=cls._meta.active_schema.kind, id=data.get("id"), hfid=data.get("hfid"), branch=branch
|
|
268
269
|
)
|
|
269
|
-
|
|
270
270
|
obj, result = await cls._call_mutate_update(info=info, data=data, db=db, branch=branch, obj=obj)
|
|
271
271
|
|
|
272
272
|
return obj, result
|
|
@@ -517,15 +517,34 @@ def _should_kind_be_locked_on_any_branch(kind: str, schema_branch: SchemaBranch)
|
|
|
517
517
|
return False
|
|
518
518
|
|
|
519
519
|
|
|
520
|
-
def
|
|
520
|
+
def _hash(value: str) -> str:
|
|
521
|
+
# Do not use builtin `hash` for lock names as due to randomization results would differ between
|
|
522
|
+
# different processes.
|
|
523
|
+
return hashlib.sha256(value.encode()).hexdigest()
|
|
524
|
+
|
|
525
|
+
|
|
526
|
+
def _get_kind_lock_names_on_object_mutation(
|
|
527
|
+
kind: str, branch: Branch, schema_branch: SchemaBranch, data: InputObjectType
|
|
528
|
+
) -> list[str]:
|
|
521
529
|
"""
|
|
522
530
|
Return objects kind for which we want to avoid concurrent mutation (create/update). Except for some specific kinds,
|
|
523
531
|
concurrent mutations are only allowed on non-main branch as objects validations will be performed at least when merging in main branch.
|
|
524
532
|
"""
|
|
525
533
|
|
|
526
|
-
if not branch.is_default and not _should_kind_be_locked_on_any_branch(kind, schema_branch):
|
|
534
|
+
if not branch.is_default and not _should_kind_be_locked_on_any_branch(kind=kind, schema_branch=schema_branch):
|
|
527
535
|
return []
|
|
528
536
|
|
|
537
|
+
if kind == InfrahubKind.GRAPHQLQUERYGROUP:
|
|
538
|
+
# Lock on name as well to improve performances
|
|
539
|
+
try:
|
|
540
|
+
name = data.name.value
|
|
541
|
+
return [build_object_lock_name(kind + "." + _hash(name))]
|
|
542
|
+
except AttributeError:
|
|
543
|
+
# We might reach here if we are updating a CoreGraphQLQueryGroup without updating the name,
|
|
544
|
+
# in which case we would not need to lock. This is not supposed to happen as current `update`
|
|
545
|
+
# logic first fetches the node with its name.
|
|
546
|
+
return []
|
|
547
|
+
|
|
529
548
|
lock_kinds = _get_kinds_to_lock_on_object_mutation(kind, schema_branch)
|
|
530
549
|
lock_names = [build_object_lock_name(kind) for kind in lock_kinds]
|
|
531
550
|
return lock_names
|
|
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
from typing import TYPE_CHECKING, Any
|
|
4
4
|
|
|
5
|
-
from graphene import Field, Float, Int, List, NonNull, ObjectType, String
|
|
5
|
+
from graphene import BigInt, Field, Float, Int, List, NonNull, ObjectType, String
|
|
6
6
|
from infrahub_sdk.utils import extract_fields_first_node
|
|
7
7
|
|
|
8
8
|
from infrahub.core import registry
|
|
@@ -33,7 +33,7 @@ class IPPoolUtilizationResource(ObjectType):
|
|
|
33
33
|
id = Field(String, required=True, description="The ID of the current resource")
|
|
34
34
|
display_label = Field(String, required=True, description="The common name of the resource")
|
|
35
35
|
kind = Field(String, required=True, description="The resource kind")
|
|
36
|
-
weight = Field(
|
|
36
|
+
weight = Field(BigInt, required=True, description="The relative weight of this resource.")
|
|
37
37
|
utilization = Field(Float, required=True, description="The overall utilization of the resource.")
|
|
38
38
|
utilization_branches = Field(
|
|
39
39
|
Float, required=True, description="The utilization of the resource on all non default branches."
|
|
@@ -70,7 +70,7 @@ def _validate_pool_type(pool_id: str, pool: CoreNode | None = None) -> CoreNode:
|
|
|
70
70
|
|
|
71
71
|
|
|
72
72
|
class PoolAllocated(ObjectType):
|
|
73
|
-
count = Field(
|
|
73
|
+
count = Field(BigInt, required=True, description="The number of allocations within the selected pool.")
|
|
74
74
|
edges = Field(List(of_type=NonNull(PoolAllocatedEdge), required=True), required=True)
|
|
75
75
|
|
|
76
76
|
@staticmethod
|
|
@@ -174,7 +174,7 @@ class PoolAllocated(ObjectType):
|
|
|
174
174
|
|
|
175
175
|
|
|
176
176
|
class PoolUtilization(ObjectType):
|
|
177
|
-
count = Field(
|
|
177
|
+
count = Field(BigInt, required=True, description="The number of resources within the selected pool.")
|
|
178
178
|
utilization = Field(Float, required=True, description="The overall utilization of the pool.")
|
|
179
179
|
utilization_branches = Field(Float, required=True, description="The utilization in all non default branches.")
|
|
180
180
|
utilization_default_branch = Field(
|
|
@@ -652,7 +652,7 @@ async def validate_artifacts_generation(
|
|
|
652
652
|
repository_kind=repository.kind,
|
|
653
653
|
branch_name=model.source_branch,
|
|
654
654
|
query=model.artifact_definition.query_name,
|
|
655
|
-
variables=member.extract(params=artifact_definition.parameters.value),
|
|
655
|
+
variables=await member.extract(params=artifact_definition.parameters.value),
|
|
656
656
|
target_id=member.id,
|
|
657
657
|
target_kind=member.get_kind(),
|
|
658
658
|
target_name=member.display_label,
|
|
@@ -913,7 +913,7 @@ async def request_generator_definition_check(
|
|
|
913
913
|
repository_kind=repository.kind,
|
|
914
914
|
branch_name=model.source_branch,
|
|
915
915
|
query=model.generator_definition.query_name,
|
|
916
|
-
variables=member.extract(params=model.generator_definition.parameters),
|
|
916
|
+
variables=await member.extract(params=model.generator_definition.parameters),
|
|
917
917
|
target_id=member.id,
|
|
918
918
|
target_name=member.display_label,
|
|
919
919
|
validator_id=validator.id,
|
infrahub/tasks/registry.py
CHANGED
|
@@ -1,17 +1,77 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
from typing import TYPE_CHECKING
|
|
2
4
|
|
|
3
5
|
from infrahub import lock
|
|
4
6
|
from infrahub.core import registry
|
|
5
|
-
from infrahub.database import InfrahubDatabase
|
|
6
7
|
from infrahub.log import get_logger
|
|
7
8
|
from infrahub.worker import WORKER_IDENTITY
|
|
8
9
|
|
|
9
10
|
if TYPE_CHECKING:
|
|
10
11
|
from infrahub.core.branch import Branch
|
|
12
|
+
from infrahub.core.schema.schema_branch import SchemaBranch
|
|
13
|
+
from infrahub.database import InfrahubDatabase
|
|
11
14
|
|
|
12
15
|
log = get_logger()
|
|
13
16
|
|
|
14
17
|
|
|
18
|
+
def update_graphql_schema(branch: Branch, schema_branch: SchemaBranch) -> None:
|
|
19
|
+
"""
|
|
20
|
+
Update the GraphQL schema for the given branch.
|
|
21
|
+
"""
|
|
22
|
+
from infrahub.graphql.manager import GraphQLSchemaManager
|
|
23
|
+
|
|
24
|
+
gqlm = GraphQLSchemaManager.get_manager_for_branch(branch=branch, schema_branch=schema_branch)
|
|
25
|
+
gqlm.get_graphql_schema(
|
|
26
|
+
include_query=True,
|
|
27
|
+
include_mutation=True,
|
|
28
|
+
include_subscription=True,
|
|
29
|
+
include_types=True,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def create_branch_registry(db: InfrahubDatabase, branch: Branch) -> None:
|
|
34
|
+
"""Create a new entry in the registry for a given branch."""
|
|
35
|
+
|
|
36
|
+
log.info("New branch detected, pulling schema", branch=branch.name, worker=WORKER_IDENTITY)
|
|
37
|
+
await registry.schema.load_schema(db=db, branch=branch)
|
|
38
|
+
registry.branch[branch.name] = branch
|
|
39
|
+
schema_branch = registry.schema.get_schema_branch(name=branch.name)
|
|
40
|
+
update_graphql_schema(branch=branch, schema_branch=schema_branch)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
async def update_branch_registry(db: InfrahubDatabase, branch: Branch) -> None:
|
|
44
|
+
"""Update the registry for a branch if the schema hash has changed."""
|
|
45
|
+
|
|
46
|
+
existing_branch: Branch = registry.branch[branch.name]
|
|
47
|
+
|
|
48
|
+
if not existing_branch.schema_hash:
|
|
49
|
+
log.warning("Branch schema hash is not set, cannot update branch registry")
|
|
50
|
+
return
|
|
51
|
+
|
|
52
|
+
if existing_branch.schema_hash and existing_branch.schema_hash.main == branch.active_schema_hash.main:
|
|
53
|
+
log.debug(
|
|
54
|
+
"Branch schema hash is the same, no need to update branch registry",
|
|
55
|
+
branch=branch.name,
|
|
56
|
+
hash=existing_branch.schema_hash.main,
|
|
57
|
+
worker=WORKER_IDENTITY,
|
|
58
|
+
)
|
|
59
|
+
return
|
|
60
|
+
|
|
61
|
+
log.info(
|
|
62
|
+
"New hash detected",
|
|
63
|
+
branch=branch.name,
|
|
64
|
+
hash_current=existing_branch.schema_hash.main,
|
|
65
|
+
hash_new=branch.active_schema_hash.main,
|
|
66
|
+
worker=WORKER_IDENTITY,
|
|
67
|
+
)
|
|
68
|
+
await registry.schema.load_schema(db=db, branch=branch)
|
|
69
|
+
registry.branch[branch.name] = branch
|
|
70
|
+
schema_branch = registry.schema.get_schema_branch(name=branch.name)
|
|
71
|
+
|
|
72
|
+
update_graphql_schema(branch=branch, schema_branch=schema_branch)
|
|
73
|
+
|
|
74
|
+
|
|
15
75
|
async def refresh_branches(db: InfrahubDatabase) -> None:
|
|
16
76
|
"""Pull all the branches from the database and update the registry.
|
|
17
77
|
|
|
@@ -24,41 +84,9 @@ async def refresh_branches(db: InfrahubDatabase) -> None:
|
|
|
24
84
|
branches = await registry.branch_object.get_list(db=db)
|
|
25
85
|
for new_branch in branches:
|
|
26
86
|
if new_branch.name in registry.branch:
|
|
27
|
-
|
|
28
|
-
if (
|
|
29
|
-
branch_registry.schema_hash
|
|
30
|
-
and branch_registry.schema_hash.main != new_branch.active_schema_hash.main
|
|
31
|
-
):
|
|
32
|
-
log.info(
|
|
33
|
-
"New hash detected",
|
|
34
|
-
branch=new_branch.name,
|
|
35
|
-
hash_current=branch_registry.schema_hash.main,
|
|
36
|
-
hash_new=new_branch.active_schema_hash.main,
|
|
37
|
-
worker=WORKER_IDENTITY,
|
|
38
|
-
)
|
|
39
|
-
await registry.schema.load_schema(db=db, branch=new_branch)
|
|
40
|
-
registry.branch[new_branch.name] = new_branch
|
|
41
|
-
schema_branch = registry.schema.get_schema_branch(name=new_branch.name)
|
|
42
|
-
gqlm = GraphQLSchemaManager.get_manager_for_branch(branch=new_branch, schema_branch=schema_branch)
|
|
43
|
-
gqlm.get_graphql_schema(
|
|
44
|
-
include_query=True,
|
|
45
|
-
include_mutation=True,
|
|
46
|
-
include_subscription=True,
|
|
47
|
-
include_types=True,
|
|
48
|
-
)
|
|
49
|
-
|
|
87
|
+
await update_branch_registry(db=db, branch=new_branch)
|
|
50
88
|
else:
|
|
51
|
-
|
|
52
|
-
await registry.schema.load_schema(db=db, branch=new_branch)
|
|
53
|
-
registry.branch[new_branch.name] = new_branch
|
|
54
|
-
schema_branch = registry.schema.get_schema_branch(name=new_branch.name)
|
|
55
|
-
gqlm = GraphQLSchemaManager.get_manager_for_branch(branch=new_branch, schema_branch=schema_branch)
|
|
56
|
-
gqlm.get_graphql_schema(
|
|
57
|
-
include_query=True,
|
|
58
|
-
include_mutation=True,
|
|
59
|
-
include_subscription=True,
|
|
60
|
-
include_types=True,
|
|
61
|
-
)
|
|
89
|
+
await create_branch_registry(db=db, branch=new_branch)
|
|
62
90
|
|
|
63
91
|
purged_branches = await registry.purge_inactive_branches(db=db, active_branches=branches)
|
|
64
92
|
purged_branches.update(
|
infrahub_sdk/client.py
CHANGED
|
@@ -784,7 +784,6 @@ class InfrahubClient(BaseClient):
|
|
|
784
784
|
if at:
|
|
785
785
|
at = Timestamp(at)
|
|
786
786
|
|
|
787
|
-
node = InfrahubNode(client=self, schema=schema, branch=branch)
|
|
788
787
|
filters = kwargs
|
|
789
788
|
pagination_size = self.pagination_size
|
|
790
789
|
|
|
@@ -825,12 +824,12 @@ class InfrahubClient(BaseClient):
|
|
|
825
824
|
nodes = []
|
|
826
825
|
related_nodes = []
|
|
827
826
|
batch_process = await self.create_batch()
|
|
828
|
-
count = await self.count(kind=schema.kind, partial_match=partial_match, **filters)
|
|
827
|
+
count = await self.count(kind=schema.kind, branch=branch, partial_match=partial_match, **filters)
|
|
829
828
|
total_pages = (count + pagination_size - 1) // pagination_size
|
|
830
829
|
|
|
831
830
|
for page_number in range(1, total_pages + 1):
|
|
832
831
|
page_offset = (page_number - 1) * pagination_size
|
|
833
|
-
batch_process.add(task=process_page,
|
|
832
|
+
batch_process.add(task=process_page, page_offset=page_offset, page_number=page_number)
|
|
834
833
|
|
|
835
834
|
async for _, response in batch_process.execute():
|
|
836
835
|
nodes.extend(response[1]["nodes"])
|
|
@@ -847,7 +846,7 @@ class InfrahubClient(BaseClient):
|
|
|
847
846
|
|
|
848
847
|
while has_remaining_items:
|
|
849
848
|
page_offset = (page_number - 1) * pagination_size
|
|
850
|
-
response, process_result = await process_page(page_offset, page_number)
|
|
849
|
+
response, process_result = await process_page(page_offset=page_offset, page_number=page_number)
|
|
851
850
|
|
|
852
851
|
nodes.extend(process_result["nodes"])
|
|
853
852
|
related_nodes.extend(process_result["related_nodes"])
|
|
@@ -1946,9 +1945,9 @@ class InfrahubClientSync(BaseClient):
|
|
|
1946
1945
|
"""
|
|
1947
1946
|
branch = branch or self.default_branch
|
|
1948
1947
|
schema = self.schema.get(kind=kind, branch=branch)
|
|
1949
|
-
node = InfrahubNodeSync(client=self, schema=schema, branch=branch)
|
|
1950
1948
|
if at:
|
|
1951
1949
|
at = Timestamp(at)
|
|
1950
|
+
|
|
1952
1951
|
filters = kwargs
|
|
1953
1952
|
pagination_size = self.pagination_size
|
|
1954
1953
|
|
|
@@ -1990,12 +1989,12 @@ class InfrahubClientSync(BaseClient):
|
|
|
1990
1989
|
related_nodes = []
|
|
1991
1990
|
batch_process = self.create_batch()
|
|
1992
1991
|
|
|
1993
|
-
count = self.count(kind=schema.kind, partial_match=partial_match, **filters)
|
|
1992
|
+
count = self.count(kind=schema.kind, branch=branch, partial_match=partial_match, **filters)
|
|
1994
1993
|
total_pages = (count + pagination_size - 1) // pagination_size
|
|
1995
1994
|
|
|
1996
1995
|
for page_number in range(1, total_pages + 1):
|
|
1997
1996
|
page_offset = (page_number - 1) * pagination_size
|
|
1998
|
-
batch_process.add(task=process_page,
|
|
1997
|
+
batch_process.add(task=process_page, page_offset=page_offset, page_number=page_number)
|
|
1999
1998
|
|
|
2000
1999
|
for _, response in batch_process.execute():
|
|
2001
2000
|
nodes.extend(response[1]["nodes"])
|
|
@@ -2012,7 +2011,7 @@ class InfrahubClientSync(BaseClient):
|
|
|
2012
2011
|
|
|
2013
2012
|
while has_remaining_items:
|
|
2014
2013
|
page_offset = (page_number - 1) * pagination_size
|
|
2015
|
-
response, process_result = process_page(page_offset, page_number)
|
|
2014
|
+
response, process_result = process_page(page_offset=page_offset, page_number=page_number)
|
|
2016
2015
|
|
|
2017
2016
|
nodes.extend(process_result["nodes"])
|
|
2018
2017
|
related_nodes.extend(process_result["related_nodes"])
|
infrahub_sdk/ctl/utils.py
CHANGED
|
@@ -187,6 +187,9 @@ def load_yamlfile_from_disk_and_exit(
|
|
|
187
187
|
has_error = False
|
|
188
188
|
try:
|
|
189
189
|
data_files = file_type.load_from_disk(paths=paths)
|
|
190
|
+
if not data_files:
|
|
191
|
+
console.print("[red]No valid files found to load.")
|
|
192
|
+
raise typer.Exit(1)
|
|
190
193
|
except FileNotValidError as exc:
|
|
191
194
|
console.print(f"[red]{exc.message}")
|
|
192
195
|
raise typer.Exit(1) from exc
|
infrahub_sdk/node/node.py
CHANGED
|
@@ -8,7 +8,7 @@ from ..constants import InfrahubClientMode
|
|
|
8
8
|
from ..exceptions import FeatureNotSupportedError, NodeNotFoundError, ResourceNotDefinedError, SchemaNotFoundError
|
|
9
9
|
from ..graphql import Mutation, Query
|
|
10
10
|
from ..schema import GenericSchemaAPI, RelationshipCardinality, RelationshipKind
|
|
11
|
-
from ..utils import compare_lists, generate_short_id
|
|
11
|
+
from ..utils import compare_lists, generate_short_id
|
|
12
12
|
from .attribute import Attribute
|
|
13
13
|
from .constants import (
|
|
14
14
|
ARTIFACT_DEFINITION_GENERATE_FEATURE_NOT_SUPPORTED_MESSAGE,
|
|
@@ -402,10 +402,10 @@ class InfrahubNodeBase:
|
|
|
402
402
|
if order:
|
|
403
403
|
data["@filters"]["order"] = order
|
|
404
404
|
|
|
405
|
-
if offset:
|
|
405
|
+
if offset is not None:
|
|
406
406
|
data["@filters"]["offset"] = offset
|
|
407
407
|
|
|
408
|
-
if limit:
|
|
408
|
+
if limit is not None:
|
|
409
409
|
data["@filters"]["limit"] = limit
|
|
410
410
|
|
|
411
411
|
if include and exclude:
|
|
@@ -418,14 +418,6 @@ class InfrahubNodeBase:
|
|
|
418
418
|
|
|
419
419
|
return data
|
|
420
420
|
|
|
421
|
-
def extract(self, params: dict[str, str]) -> dict[str, Any]:
|
|
422
|
-
"""Extract some datapoints defined in a flat notation."""
|
|
423
|
-
result: dict[str, Any] = {}
|
|
424
|
-
for key, value in params.items():
|
|
425
|
-
result[key] = get_flat_value(self, key=value)
|
|
426
|
-
|
|
427
|
-
return result
|
|
428
|
-
|
|
429
421
|
def __hash__(self) -> int:
|
|
430
422
|
return hash(self.id)
|
|
431
423
|
|
|
@@ -1036,6 +1028,46 @@ class InfrahubNode(InfrahubNodeBase):
|
|
|
1036
1028
|
|
|
1037
1029
|
raise ResourceNotDefinedError(message=f"The node doesn't have a cardinality=one relationship for {name}")
|
|
1038
1030
|
|
|
1031
|
+
async def get_flat_value(self, key: str, separator: str = "__") -> Any:
|
|
1032
|
+
"""Query recursively a value defined in a flat notation (string), on a hierarchy of objects
|
|
1033
|
+
|
|
1034
|
+
Examples:
|
|
1035
|
+
name__value
|
|
1036
|
+
module.object.value
|
|
1037
|
+
"""
|
|
1038
|
+
if separator not in key:
|
|
1039
|
+
return getattr(self, key)
|
|
1040
|
+
|
|
1041
|
+
first, remaining = key.split(separator, maxsplit=1)
|
|
1042
|
+
|
|
1043
|
+
if first in self._schema.attribute_names:
|
|
1044
|
+
attr = getattr(self, first)
|
|
1045
|
+
for part in remaining.split(separator):
|
|
1046
|
+
attr = getattr(attr, part)
|
|
1047
|
+
return attr
|
|
1048
|
+
|
|
1049
|
+
try:
|
|
1050
|
+
rel = self._schema.get_relationship(name=first)
|
|
1051
|
+
except ValueError as exc:
|
|
1052
|
+
raise ValueError(f"No attribute or relationship named '{first}' for '{self._schema.kind}'") from exc
|
|
1053
|
+
|
|
1054
|
+
if rel.cardinality != RelationshipCardinality.ONE:
|
|
1055
|
+
raise ValueError(
|
|
1056
|
+
f"Can only look up flat value for relationships of cardinality {RelationshipCardinality.ONE.value}"
|
|
1057
|
+
)
|
|
1058
|
+
|
|
1059
|
+
related_node: RelatedNode = getattr(self, first)
|
|
1060
|
+
await related_node.fetch()
|
|
1061
|
+
return await related_node.peer.get_flat_value(key=remaining, separator=separator)
|
|
1062
|
+
|
|
1063
|
+
async def extract(self, params: dict[str, str]) -> dict[str, Any]:
|
|
1064
|
+
"""Extract some datapoints defined in a flat notation."""
|
|
1065
|
+
result: dict[str, Any] = {}
|
|
1066
|
+
for key, value in params.items():
|
|
1067
|
+
result[key] = await self.get_flat_value(key=value)
|
|
1068
|
+
|
|
1069
|
+
return result
|
|
1070
|
+
|
|
1039
1071
|
def __dir__(self) -> Iterable[str]:
|
|
1040
1072
|
base = list(super().__dir__())
|
|
1041
1073
|
return sorted(
|
|
@@ -1493,15 +1525,15 @@ class InfrahubNodeSync(InfrahubNodeBase):
|
|
|
1493
1525
|
for rel_name in self._relationships:
|
|
1494
1526
|
rel = getattr(self, rel_name)
|
|
1495
1527
|
if rel and isinstance(rel, RelatedNodeSync):
|
|
1496
|
-
relation = node_data["node"].get(rel_name)
|
|
1497
|
-
if relation.get("node", None):
|
|
1528
|
+
relation = node_data["node"].get(rel_name, None)
|
|
1529
|
+
if relation and relation.get("node", None):
|
|
1498
1530
|
related_node = InfrahubNodeSync.from_graphql(
|
|
1499
1531
|
client=self._client, branch=branch, data=relation, timeout=timeout
|
|
1500
1532
|
)
|
|
1501
1533
|
related_nodes.append(related_node)
|
|
1502
1534
|
elif rel and isinstance(rel, RelationshipManagerSync):
|
|
1503
|
-
peers = node_data["node"].get(rel_name)
|
|
1504
|
-
if peers:
|
|
1535
|
+
peers = node_data["node"].get(rel_name, None)
|
|
1536
|
+
if peers and peers["edges"]:
|
|
1505
1537
|
for peer in peers["edges"]:
|
|
1506
1538
|
related_node = InfrahubNodeSync.from_graphql(
|
|
1507
1539
|
client=self._client, branch=branch, data=peer, timeout=timeout
|
|
@@ -1622,6 +1654,46 @@ class InfrahubNodeSync(InfrahubNodeBase):
|
|
|
1622
1654
|
|
|
1623
1655
|
raise ResourceNotDefinedError(message=f"The node doesn't have a cardinality=one relationship for {name}")
|
|
1624
1656
|
|
|
1657
|
+
def get_flat_value(self, key: str, separator: str = "__") -> Any:
|
|
1658
|
+
"""Query recursively a value defined in a flat notation (string), on a hierarchy of objects
|
|
1659
|
+
|
|
1660
|
+
Examples:
|
|
1661
|
+
name__value
|
|
1662
|
+
module.object.value
|
|
1663
|
+
"""
|
|
1664
|
+
if separator not in key:
|
|
1665
|
+
return getattr(self, key)
|
|
1666
|
+
|
|
1667
|
+
first, remaining = key.split(separator, maxsplit=1)
|
|
1668
|
+
|
|
1669
|
+
if first in self._schema.attribute_names:
|
|
1670
|
+
attr = getattr(self, first)
|
|
1671
|
+
for part in remaining.split(separator):
|
|
1672
|
+
attr = getattr(attr, part)
|
|
1673
|
+
return attr
|
|
1674
|
+
|
|
1675
|
+
try:
|
|
1676
|
+
rel = self._schema.get_relationship(name=first)
|
|
1677
|
+
except ValueError as exc:
|
|
1678
|
+
raise ValueError(f"No attribute or relationship named '{first}' for '{self._schema.kind}'") from exc
|
|
1679
|
+
|
|
1680
|
+
if rel.cardinality != RelationshipCardinality.ONE:
|
|
1681
|
+
raise ValueError(
|
|
1682
|
+
f"Can only look up flat value for relationships of cardinality {RelationshipCardinality.ONE.value}"
|
|
1683
|
+
)
|
|
1684
|
+
|
|
1685
|
+
related_node: RelatedNodeSync = getattr(self, first)
|
|
1686
|
+
related_node.fetch()
|
|
1687
|
+
return related_node.peer.get_flat_value(key=remaining, separator=separator)
|
|
1688
|
+
|
|
1689
|
+
def extract(self, params: dict[str, str]) -> dict[str, Any]:
|
|
1690
|
+
"""Extract some datapoints defined in a flat notation."""
|
|
1691
|
+
result: dict[str, Any] = {}
|
|
1692
|
+
for key, value in params.items():
|
|
1693
|
+
result[key] = self.get_flat_value(key=value)
|
|
1694
|
+
|
|
1695
|
+
return result
|
|
1696
|
+
|
|
1625
1697
|
def __dir__(self) -> Iterable[str]:
|
|
1626
1698
|
base = list(super().__dir__())
|
|
1627
1699
|
return sorted(
|
|
@@ -1,11 +1,15 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
from collections import defaultdict
|
|
3
4
|
from collections.abc import Iterable
|
|
4
5
|
from typing import TYPE_CHECKING, Any
|
|
5
6
|
|
|
7
|
+
from ..batch import InfrahubBatch
|
|
6
8
|
from ..exceptions import (
|
|
9
|
+
Error,
|
|
7
10
|
UninitializedError,
|
|
8
11
|
)
|
|
12
|
+
from ..types import Order
|
|
9
13
|
from .constants import PROPERTIES_FLAG, PROPERTIES_OBJECT
|
|
10
14
|
from .related_node import RelatedNode, RelatedNodeSync
|
|
11
15
|
|
|
@@ -156,8 +160,26 @@ class RelationshipManager(RelationshipManagerBase):
|
|
|
156
160
|
self.peers = rm.peers
|
|
157
161
|
self.initialized = True
|
|
158
162
|
|
|
163
|
+
ids_per_kind_map = defaultdict(list)
|
|
159
164
|
for peer in self.peers:
|
|
160
|
-
|
|
165
|
+
if not peer.id or not peer.typename:
|
|
166
|
+
raise Error("Unable to fetch the peer, id and/or typename are not defined")
|
|
167
|
+
ids_per_kind_map[peer.typename].append(peer.id)
|
|
168
|
+
|
|
169
|
+
batch = InfrahubBatch(max_concurrent_execution=self.client.max_concurrent_execution)
|
|
170
|
+
for kind, ids in ids_per_kind_map.items():
|
|
171
|
+
batch.add(
|
|
172
|
+
task=self.client.filters,
|
|
173
|
+
kind=kind,
|
|
174
|
+
ids=ids,
|
|
175
|
+
populate_store=True,
|
|
176
|
+
branch=self.branch,
|
|
177
|
+
parallel=True,
|
|
178
|
+
order=Order(disable=True),
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
async for _ in batch.execute():
|
|
182
|
+
pass
|
|
161
183
|
|
|
162
184
|
def add(self, data: str | RelatedNode | dict) -> None:
|
|
163
185
|
"""Add a new peer to this relationship."""
|
|
@@ -261,8 +283,27 @@ class RelationshipManagerSync(RelationshipManagerBase):
|
|
|
261
283
|
self.peers = rm.peers
|
|
262
284
|
self.initialized = True
|
|
263
285
|
|
|
286
|
+
ids_per_kind_map = defaultdict(list)
|
|
264
287
|
for peer in self.peers:
|
|
265
|
-
peer.
|
|
288
|
+
if not peer.id or not peer.typename:
|
|
289
|
+
raise Error("Unable to fetch the peer, id and/or typename are not defined")
|
|
290
|
+
ids_per_kind_map[peer.typename].append(peer.id)
|
|
291
|
+
|
|
292
|
+
# Unlike Async, no need to create a new batch from scratch because we are not using a semaphore
|
|
293
|
+
batch = self.client.create_batch()
|
|
294
|
+
for kind, ids in ids_per_kind_map.items():
|
|
295
|
+
batch.add(
|
|
296
|
+
task=self.client.filters,
|
|
297
|
+
kind=kind,
|
|
298
|
+
ids=ids,
|
|
299
|
+
populate_store=True,
|
|
300
|
+
branch=self.branch,
|
|
301
|
+
parallel=True,
|
|
302
|
+
order=Order(disable=True),
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
for _ in batch.execute():
|
|
306
|
+
pass
|
|
266
307
|
|
|
267
308
|
def add(self, data: str | RelatedNodeSync | dict) -> None:
|
|
268
309
|
"""Add a new peer to this relationship."""
|
infrahub_sdk/protocols_base.py
CHANGED
infrahub_sdk/utils.py
CHANGED
|
@@ -190,23 +190,6 @@ def str_to_bool(value: str) -> bool:
|
|
|
190
190
|
raise ValueError(f"{value} can not be converted into a boolean") from exc
|
|
191
191
|
|
|
192
192
|
|
|
193
|
-
def get_flat_value(obj: Any, key: str, separator: str = "__") -> Any:
|
|
194
|
-
"""Query recursively an value defined in a flat notation (string), on a hierarchy of objects
|
|
195
|
-
|
|
196
|
-
Examples:
|
|
197
|
-
name__value
|
|
198
|
-
module.object.value
|
|
199
|
-
"""
|
|
200
|
-
if separator not in key:
|
|
201
|
-
return getattr(obj, key)
|
|
202
|
-
|
|
203
|
-
first_part, remaining_part = key.split(separator, maxsplit=1)
|
|
204
|
-
sub_obj = getattr(obj, first_part)
|
|
205
|
-
if not sub_obj:
|
|
206
|
-
return None
|
|
207
|
-
return get_flat_value(obj=sub_obj, key=remaining_part, separator=separator)
|
|
208
|
-
|
|
209
|
-
|
|
210
193
|
def generate_request_filename(request: httpx.Request) -> str:
|
|
211
194
|
"""Return a filename for a request sent to the Infrahub API
|
|
212
195
|
|
infrahub_sdk/yaml.py
CHANGED
|
@@ -120,16 +120,22 @@ class YamlFile(LocalFile):
|
|
|
120
120
|
@classmethod
|
|
121
121
|
def load_from_disk(cls, paths: list[Path]) -> list[Self]:
|
|
122
122
|
yaml_files: list[Self] = []
|
|
123
|
+
file_extensions = {".yaml", ".yml", ".json"} # FIXME: .json is not a YAML file, should be removed
|
|
124
|
+
|
|
123
125
|
for file_path in paths:
|
|
124
|
-
if file_path.
|
|
125
|
-
|
|
126
|
+
if not file_path.exists():
|
|
127
|
+
# Check if the provided path exists, relevant for the first call coming from the user
|
|
128
|
+
raise FileNotValidError(name=str(file_path), message=f"{file_path} does not exist!")
|
|
129
|
+
if file_path.is_file():
|
|
130
|
+
if file_path.suffix in file_extensions:
|
|
131
|
+
yaml_files.extend(cls.load_file_from_disk(path=file_path))
|
|
132
|
+
# else: silently skip files with unrelevant extensions (e.g. .md, .py...)
|
|
126
133
|
elif file_path.is_dir():
|
|
134
|
+
# Introduce recursion to handle sub-folders
|
|
127
135
|
sub_paths = [Path(sub_file_path) for sub_file_path in file_path.glob("*")]
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
else:
|
|
132
|
-
raise FileNotValidError(name=str(file_path), message=f"{file_path} does not exist!")
|
|
136
|
+
sub_paths = sorted(sub_paths, key=lambda p: p.name)
|
|
137
|
+
yaml_files.extend(cls.load_from_disk(paths=sub_paths))
|
|
138
|
+
# else: skip non-file, non-dir (e.g., symlink...)
|
|
133
139
|
|
|
134
140
|
return yaml_files
|
|
135
141
|
|