infrahub-server 1.3.0b6__py3-none-any.whl → 1.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/cli/db.py +7 -5
- infrahub/cli/upgrade.py +6 -1
- infrahub/core/attribute.py +5 -0
- infrahub/core/diff/calculator.py +4 -1
- infrahub/core/diff/coordinator.py +8 -1
- infrahub/core/diff/query/field_specifiers.py +1 -1
- infrahub/core/diff/query/merge.py +2 -2
- infrahub/core/diff/query_parser.py +23 -32
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/migrations/graph/__init__.py +2 -0
- infrahub/core/migrations/graph/m012_convert_account_generic.py +1 -1
- infrahub/core/migrations/graph/m023_deduplicate_cardinality_one_relationships.py +2 -2
- infrahub/core/migrations/graph/m029_duplicates_cleanup.py +2 -2
- infrahub/core/migrations/graph/m031_check_number_attributes.py +102 -0
- infrahub/core/migrations/query/attribute_rename.py +1 -1
- infrahub/core/node/__init__.py +5 -1
- infrahub/core/node/constraints/grouped_uniqueness.py +88 -132
- infrahub/core/query/delete.py +3 -3
- infrahub/core/schema/attribute_parameters.py +12 -5
- infrahub/core/schema/basenode_schema.py +107 -1
- infrahub/core/schema/schema_branch.py +17 -5
- infrahub/core/validators/attribute/min_max.py +7 -2
- infrahub/core/validators/uniqueness/model.py +17 -0
- infrahub/core/validators/uniqueness/query.py +212 -1
- infrahub/graphql/app.py +5 -1
- infrahub/graphql/mutations/main.py +18 -2
- infrahub/services/adapters/message_bus/nats.py +5 -1
- infrahub/services/scheduler.py +5 -1
- infrahub_sdk/node/__init__.py +2 -0
- infrahub_sdk/node/node.py +33 -2
- infrahub_sdk/node/related_node.py +7 -0
- {infrahub_server-1.3.0b6.dist-info → infrahub_server-1.3.2.dist-info}/METADATA +1 -1
- {infrahub_server-1.3.0b6.dist-info → infrahub_server-1.3.2.dist-info}/RECORD +36 -35
- {infrahub_server-1.3.0b6.dist-info → infrahub_server-1.3.2.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.3.0b6.dist-info → infrahub_server-1.3.2.dist-info}/WHEEL +0 -0
- {infrahub_server-1.3.0b6.dist-info → infrahub_server-1.3.2.dist-info}/entry_points.txt +0 -0
|
@@ -5,10 +5,12 @@ from typing import TYPE_CHECKING, Any
|
|
|
5
5
|
from infrahub.core.constants.relationship_label import RELATIONSHIP_TO_VALUE_LABEL
|
|
6
6
|
from infrahub.core.query import Query, QueryType
|
|
7
7
|
|
|
8
|
+
from .model import QueryAttributePathValued, QueryRelationshipPathValued
|
|
9
|
+
|
|
8
10
|
if TYPE_CHECKING:
|
|
9
11
|
from infrahub.database import InfrahubDatabase
|
|
10
12
|
|
|
11
|
-
from .model import NodeUniquenessQueryRequest
|
|
13
|
+
from .model import NodeUniquenessQueryRequest, NodeUniquenessQueryRequestValued
|
|
12
14
|
|
|
13
15
|
|
|
14
16
|
class NodeUniqueAttributeConstraintQuery(Query):
|
|
@@ -244,3 +246,212 @@ class NodeUniqueAttributeConstraintQuery(Query):
|
|
|
244
246
|
"attr_value",
|
|
245
247
|
"relationship_identifier",
|
|
246
248
|
]
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
class UniquenessValidationQuery(Query):
|
|
252
|
+
name = "uniqueness_constraint_validation"
|
|
253
|
+
type = QueryType.READ
|
|
254
|
+
|
|
255
|
+
def __init__(
|
|
256
|
+
self,
|
|
257
|
+
query_request: NodeUniquenessQueryRequestValued,
|
|
258
|
+
node_ids_to_exclude: list[str] | None = None,
|
|
259
|
+
**kwargs: Any,
|
|
260
|
+
) -> None:
|
|
261
|
+
self.query_request = query_request
|
|
262
|
+
self.node_ids_to_exclude = node_ids_to_exclude
|
|
263
|
+
super().__init__(**kwargs)
|
|
264
|
+
|
|
265
|
+
def _build_attr_subquery(
|
|
266
|
+
self, node_kind: str, attr_path: QueryAttributePathValued, index: int, branch_filter: str, is_first_query: bool
|
|
267
|
+
) -> tuple[str, dict[str, str | int | float | bool]]:
|
|
268
|
+
attr_name_var = f"attr_name_{index}"
|
|
269
|
+
attr_value_var = f"attr_value_{index}"
|
|
270
|
+
if is_first_query:
|
|
271
|
+
first_query_filter = "WHERE $node_ids_to_exclude IS NULL OR NOT node.uuid IN $node_ids_to_exclude"
|
|
272
|
+
else:
|
|
273
|
+
first_query_filter = ""
|
|
274
|
+
attribute_query = """
|
|
275
|
+
MATCH (node:%(node_kind)s)-[:HAS_ATTRIBUTE]->(attr:Attribute {name: $%(attr_name_var)s})-[:HAS_VALUE]->(:AttributeValue {value: $%(attr_value_var)s})
|
|
276
|
+
%(first_query_filter)s
|
|
277
|
+
WITH DISTINCT node
|
|
278
|
+
CALL (node) {
|
|
279
|
+
MATCH (node)-[r:HAS_ATTRIBUTE]->(attr:Attribute {name: $%(attr_name_var)s})
|
|
280
|
+
WHERE %(branch_filter)s
|
|
281
|
+
WITH attr, r.status = "active" AS is_active
|
|
282
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
283
|
+
WITH attr, is_active
|
|
284
|
+
LIMIT 1
|
|
285
|
+
WITH attr, is_active
|
|
286
|
+
WHERE is_active = TRUE
|
|
287
|
+
MATCH (attr)-[r:HAS_VALUE]->(:AttributeValue {value: $%(attr_value_var)s})
|
|
288
|
+
WHERE %(branch_filter)s
|
|
289
|
+
WITH r
|
|
290
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
291
|
+
LIMIT 1
|
|
292
|
+
WITH r
|
|
293
|
+
WHERE r.status = "active"
|
|
294
|
+
RETURN 1 AS is_match_%(index)s
|
|
295
|
+
}
|
|
296
|
+
""" % {
|
|
297
|
+
"first_query_filter": first_query_filter,
|
|
298
|
+
"node_kind": node_kind,
|
|
299
|
+
"attr_name_var": attr_name_var,
|
|
300
|
+
"attr_value_var": attr_value_var,
|
|
301
|
+
"branch_filter": branch_filter,
|
|
302
|
+
"index": index,
|
|
303
|
+
}
|
|
304
|
+
params: dict[str, str | int | float | bool] = {
|
|
305
|
+
attr_name_var: attr_path.attribute_name,
|
|
306
|
+
attr_value_var: attr_path.value,
|
|
307
|
+
}
|
|
308
|
+
return attribute_query, params
|
|
309
|
+
|
|
310
|
+
def _build_rel_subquery(
|
|
311
|
+
self,
|
|
312
|
+
node_kind: str,
|
|
313
|
+
rel_path: QueryRelationshipPathValued,
|
|
314
|
+
index: int,
|
|
315
|
+
branch_filter: str,
|
|
316
|
+
is_first_query: bool,
|
|
317
|
+
) -> tuple[str, dict[str, str | int | float | bool]]:
|
|
318
|
+
params: dict[str, str | int | float | bool] = {}
|
|
319
|
+
rel_attr_query = ""
|
|
320
|
+
rel_attr_match = ""
|
|
321
|
+
if rel_path.attribute_name and rel_path.attribute_value:
|
|
322
|
+
attr_name_var = f"attr_name_{index}"
|
|
323
|
+
attr_value_var = f"attr_value_{index}"
|
|
324
|
+
rel_attr_query = """
|
|
325
|
+
MATCH (peer)-[r:HAS_ATTRIBUTE]->(attr:Attribute {name: $%(attr_name_var)s})
|
|
326
|
+
WHERE %(branch_filter)s
|
|
327
|
+
WITH attr, r.status = "active" AS is_active
|
|
328
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
329
|
+
WITH attr, is_active
|
|
330
|
+
LIMIT 1
|
|
331
|
+
WITH attr, is_active
|
|
332
|
+
WHERE is_active = TRUE
|
|
333
|
+
MATCH (attr)-[r:HAS_VALUE]->(:AttributeValue {value: $%(attr_value_var)s})
|
|
334
|
+
WHERE %(branch_filter)s
|
|
335
|
+
WITH r
|
|
336
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
337
|
+
LIMIT 1
|
|
338
|
+
WITH r
|
|
339
|
+
WHERE r.status = "active"
|
|
340
|
+
""" % {"attr_name_var": attr_name_var, "attr_value_var": attr_value_var, "branch_filter": branch_filter}
|
|
341
|
+
rel_attr_match = (
|
|
342
|
+
"-[r:HAS_ATTRIBUTE]->(attr:Attribute {name: $%(attr_name_var)s})-[:HAS_VALUE]->(:AttributeValue {value: $%(attr_value_var)s})"
|
|
343
|
+
% {
|
|
344
|
+
"attr_name_var": attr_name_var,
|
|
345
|
+
"attr_value_var": attr_value_var,
|
|
346
|
+
}
|
|
347
|
+
)
|
|
348
|
+
params[attr_name_var] = rel_path.attribute_name
|
|
349
|
+
params[attr_value_var] = rel_path.attribute_value
|
|
350
|
+
query_arrows = rel_path.relationship_schema.get_query_arrows()
|
|
351
|
+
rel_name_var = f"rel_name_{index}"
|
|
352
|
+
# long path MATCH is required to hit an index on the peer or AttributeValue of the peer
|
|
353
|
+
first_match = (
|
|
354
|
+
"MATCH (node:%(node_kind)s)%(lstart)s[:IS_RELATED]%(lend)s(:Relationship {name: $%(rel_name_var)s})%(rstart)s[:IS_RELATED]%(rend)s"
|
|
355
|
+
% {
|
|
356
|
+
"node_kind": node_kind,
|
|
357
|
+
"lstart": query_arrows.left.start,
|
|
358
|
+
"lend": query_arrows.left.end,
|
|
359
|
+
"rstart": query_arrows.right.start,
|
|
360
|
+
"rend": query_arrows.right.end,
|
|
361
|
+
"rel_name_var": rel_name_var,
|
|
362
|
+
}
|
|
363
|
+
)
|
|
364
|
+
peer_where = f"WHERE {branch_filter}"
|
|
365
|
+
if rel_path.peer_id:
|
|
366
|
+
peer_id_var = f"peer_id_{index}"
|
|
367
|
+
peer_where += f" AND peer.uuid = ${peer_id_var}"
|
|
368
|
+
params[peer_id_var] = rel_path.peer_id
|
|
369
|
+
first_match += "(:Node {uuid: $%(peer_id_var)s})" % {"peer_id_var": peer_id_var}
|
|
370
|
+
else:
|
|
371
|
+
peer_where += " AND peer.uuid <> node.uuid"
|
|
372
|
+
first_match += "(:Node)"
|
|
373
|
+
if rel_attr_match:
|
|
374
|
+
first_match += rel_attr_match
|
|
375
|
+
if is_first_query:
|
|
376
|
+
first_query_filter = "WHERE $node_ids_to_exclude IS NULL OR NOT node.uuid IN $node_ids_to_exclude"
|
|
377
|
+
else:
|
|
378
|
+
first_query_filter = ""
|
|
379
|
+
relationship_query = f"""
|
|
380
|
+
{first_match}
|
|
381
|
+
{first_query_filter}
|
|
382
|
+
WITH DISTINCT node
|
|
383
|
+
"""
|
|
384
|
+
relationship_query += """
|
|
385
|
+
CALL (node) {
|
|
386
|
+
MATCH (node)%(lstart)s[r:IS_RELATED]%(lend)s(rel:Relationship {name: $%(rel_name_var)s})
|
|
387
|
+
WHERE %(branch_filter)s
|
|
388
|
+
WITH rel, r.status = "active" AS is_active
|
|
389
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
390
|
+
WITH rel, is_active
|
|
391
|
+
LIMIT 1
|
|
392
|
+
WITH rel, is_active
|
|
393
|
+
WHERE is_active = TRUE
|
|
394
|
+
MATCH (rel)%(rstart)s[r:IS_RELATED]%(rend)s(peer:Node)
|
|
395
|
+
%(peer_where)s
|
|
396
|
+
WITH peer, r.status = "active" AS is_active
|
|
397
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
398
|
+
WITH peer, is_active
|
|
399
|
+
LIMIT 1
|
|
400
|
+
WITH peer, is_active
|
|
401
|
+
WHERE is_active = TRUE
|
|
402
|
+
%(rel_attr_query)s
|
|
403
|
+
RETURN 1 AS is_match_%(index)s
|
|
404
|
+
LIMIT 1
|
|
405
|
+
}
|
|
406
|
+
""" % {
|
|
407
|
+
"rel_name_var": rel_name_var,
|
|
408
|
+
"lstart": query_arrows.left.start,
|
|
409
|
+
"lend": query_arrows.left.end,
|
|
410
|
+
"rstart": query_arrows.right.start,
|
|
411
|
+
"rend": query_arrows.right.end,
|
|
412
|
+
"peer_where": peer_where,
|
|
413
|
+
"rel_attr_query": rel_attr_query,
|
|
414
|
+
"branch_filter": branch_filter,
|
|
415
|
+
"index": index,
|
|
416
|
+
}
|
|
417
|
+
params[rel_name_var] = rel_path.relationship_schema.get_identifier()
|
|
418
|
+
return relationship_query, params
|
|
419
|
+
|
|
420
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
|
|
421
|
+
self.params["node_ids_to_exclude"] = self.node_ids_to_exclude
|
|
422
|
+
branch_filter, branch_params = self.branch.get_query_filter_path(at=self.at.to_string(), is_isolated=False)
|
|
423
|
+
self.params.update(branch_params)
|
|
424
|
+
|
|
425
|
+
subqueries = []
|
|
426
|
+
for index, schema_path in enumerate(self.query_request.unique_valued_paths):
|
|
427
|
+
is_first_query = index == 0
|
|
428
|
+
if isinstance(schema_path, QueryAttributePathValued):
|
|
429
|
+
subquery, params = self._build_attr_subquery(
|
|
430
|
+
node_kind=self.query_request.kind,
|
|
431
|
+
attr_path=schema_path,
|
|
432
|
+
index=index,
|
|
433
|
+
branch_filter=branch_filter,
|
|
434
|
+
is_first_query=is_first_query,
|
|
435
|
+
)
|
|
436
|
+
else:
|
|
437
|
+
subquery, params = self._build_rel_subquery(
|
|
438
|
+
node_kind=self.query_request.kind,
|
|
439
|
+
rel_path=schema_path,
|
|
440
|
+
index=index,
|
|
441
|
+
branch_filter=branch_filter,
|
|
442
|
+
is_first_query=is_first_query,
|
|
443
|
+
)
|
|
444
|
+
subqueries.append(subquery)
|
|
445
|
+
self.params.update(params)
|
|
446
|
+
|
|
447
|
+
full_query = "\n".join(subqueries)
|
|
448
|
+
self.add_to_query(full_query)
|
|
449
|
+
self.return_labels = ["node.uuid AS node_uuid", "node.kind AS node_kind"]
|
|
450
|
+
|
|
451
|
+
def get_violation_nodes(self) -> list[tuple[str, str]]:
|
|
452
|
+
violation_tuples = []
|
|
453
|
+
for result in self.results:
|
|
454
|
+
violation_tuples.append(
|
|
455
|
+
(result.get_as_type("node_uuid", return_type=str), result.get_as_type("node_kind", return_type=str))
|
|
456
|
+
)
|
|
457
|
+
return violation_tuples
|
infrahub/graphql/app.py
CHANGED
|
@@ -88,6 +88,8 @@ GQL_STOP = "stop"
|
|
|
88
88
|
ContextValue = Any | Callable[[HTTPConnection], Any]
|
|
89
89
|
RootValue = Any
|
|
90
90
|
|
|
91
|
+
subscription_tasks = set()
|
|
92
|
+
|
|
91
93
|
|
|
92
94
|
class InfrahubGraphQLApp:
|
|
93
95
|
def __init__(
|
|
@@ -446,7 +448,9 @@ class InfrahubGraphQLApp:
|
|
|
446
448
|
|
|
447
449
|
asyncgen = cast(AsyncGenerator[Any, None], result)
|
|
448
450
|
subscriptions[operation_id] = asyncgen
|
|
449
|
-
asyncio.create_task(self._observe_subscription(asyncgen, operation_id, websocket))
|
|
451
|
+
task = asyncio.create_task(self._observe_subscription(asyncgen, operation_id, websocket))
|
|
452
|
+
subscription_tasks.add(task)
|
|
453
|
+
task.add_done_callback(subscription_tasks.discard)
|
|
450
454
|
return []
|
|
451
455
|
|
|
452
456
|
async def _observe_subscription(
|
|
@@ -25,7 +25,7 @@ from infrahub.core.timestamp import Timestamp
|
|
|
25
25
|
from infrahub.database import retry_db_transaction
|
|
26
26
|
from infrahub.dependencies.registry import get_component_registry
|
|
27
27
|
from infrahub.events.generator import generate_node_mutation_events
|
|
28
|
-
from infrahub.exceptions import HFIDViolatedError, InitializationError
|
|
28
|
+
from infrahub.exceptions import HFIDViolatedError, InitializationError, NodeNotFoundError
|
|
29
29
|
from infrahub.graphql.context import apply_external_context
|
|
30
30
|
from infrahub.lock import InfrahubMultiLock, build_object_lock_name
|
|
31
31
|
from infrahub.log import get_log_data, get_logger
|
|
@@ -384,7 +384,23 @@ class InfrahubMutationMixin:
|
|
|
384
384
|
if len(exc.matching_nodes_ids) > 1:
|
|
385
385
|
raise RuntimeError(f"Multiple {schema_name} nodes have the same hfid") from exc
|
|
386
386
|
node_id = list(exc.matching_nodes_ids)[0]
|
|
387
|
-
|
|
387
|
+
|
|
388
|
+
try:
|
|
389
|
+
node = await NodeManager.get_one(
|
|
390
|
+
db=db, id=node_id, kind=schema_name, branch=branch, raise_on_error=True
|
|
391
|
+
)
|
|
392
|
+
except NodeNotFoundError as exc:
|
|
393
|
+
if branch.is_default:
|
|
394
|
+
raise
|
|
395
|
+
raise NodeNotFoundError(
|
|
396
|
+
node_type=exc.node_type,
|
|
397
|
+
identifier=exc.identifier,
|
|
398
|
+
branch_name=branch.name,
|
|
399
|
+
message=(
|
|
400
|
+
f"Node {exc.identifier} / {exc.node_type} uses this human-friendly ID, but does not exist on"
|
|
401
|
+
f" this branch. Please rebase this branch to access {exc.identifier} / {exc.node_type}"
|
|
402
|
+
),
|
|
403
|
+
) from exc
|
|
388
404
|
updated_obj, mutation = await cls._call_mutate_update(
|
|
389
405
|
info=info,
|
|
390
406
|
data=data,
|
|
@@ -26,6 +26,8 @@ if TYPE_CHECKING:
|
|
|
26
26
|
MessageFunction = Callable[[InfrahubMessage], Awaitable[None]]
|
|
27
27
|
ResponseClass = TypeVar("ResponseClass")
|
|
28
28
|
|
|
29
|
+
publish_tasks = set()
|
|
30
|
+
|
|
29
31
|
|
|
30
32
|
async def _add_request_id(message: InfrahubMessage) -> None:
|
|
31
33
|
log_data = get_log_data()
|
|
@@ -223,7 +225,9 @@ class NATSMessageBus(InfrahubMessageBus):
|
|
|
223
225
|
# Delayed retries are directly handled in the callback using Nack
|
|
224
226
|
return
|
|
225
227
|
# Use asyncio task for delayed publish since NATS does not support that out of the box
|
|
226
|
-
asyncio.create_task(self._publish_with_delay(message, routing_key, delay))
|
|
228
|
+
task = asyncio.create_task(self._publish_with_delay(message, routing_key, delay))
|
|
229
|
+
publish_tasks.add(task)
|
|
230
|
+
task.add_done_callback(publish_tasks.discard)
|
|
227
231
|
return
|
|
228
232
|
|
|
229
233
|
for enricher in self.message_enrichers:
|
infrahub/services/scheduler.py
CHANGED
|
@@ -16,6 +16,8 @@ if TYPE_CHECKING:
|
|
|
16
16
|
|
|
17
17
|
log = get_logger()
|
|
18
18
|
|
|
19
|
+
background_tasks = set()
|
|
20
|
+
|
|
19
21
|
|
|
20
22
|
@dataclass
|
|
21
23
|
class Schedule:
|
|
@@ -56,7 +58,9 @@ class InfrahubScheduler:
|
|
|
56
58
|
|
|
57
59
|
async def start_schedule(self) -> None:
|
|
58
60
|
for schedule in self.schedules:
|
|
59
|
-
asyncio.create_task(self.run_schedule(schedule=schedule), name=f"scheduled_task_{schedule.name}")
|
|
61
|
+
task = asyncio.create_task(self.run_schedule(schedule=schedule), name=f"scheduled_task_{schedule.name}")
|
|
62
|
+
background_tasks.add(task)
|
|
63
|
+
task.add_done_callback(background_tasks.discard)
|
|
60
64
|
|
|
61
65
|
async def shutdown(self) -> None:
|
|
62
66
|
self.running = False
|
infrahub_sdk/node/__init__.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
from .attribute import Attribute
|
|
3
4
|
from .constants import (
|
|
4
5
|
ARTIFACT_DEFINITION_GENERATE_FEATURE_NOT_SUPPORTED_MESSAGE,
|
|
5
6
|
ARTIFACT_FETCH_FEATURE_NOT_SUPPORTED_MESSAGE,
|
|
@@ -25,6 +26,7 @@ __all__ = [
|
|
|
25
26
|
"PROPERTIES_FLAG",
|
|
26
27
|
"PROPERTIES_OBJECT",
|
|
27
28
|
"SAFE_VALUE",
|
|
29
|
+
"Attribute",
|
|
28
30
|
"InfrahubNode",
|
|
29
31
|
"InfrahubNodeBase",
|
|
30
32
|
"InfrahubNodeSync",
|
infrahub_sdk/node/node.py
CHANGED
|
@@ -501,11 +501,27 @@ class InfrahubNode(InfrahubNodeBase):
|
|
|
501
501
|
|
|
502
502
|
return cls(client=client, schema=schema, branch=branch, data=cls._strip_alias(data))
|
|
503
503
|
|
|
504
|
-
def _init_relationships(self, data: dict | None = None) -> None:
|
|
504
|
+
def _init_relationships(self, data: dict | RelatedNode | None = None) -> None:
|
|
505
505
|
for rel_schema in self._schema.relationships:
|
|
506
506
|
rel_data = data.get(rel_schema.name, None) if isinstance(data, dict) else None
|
|
507
507
|
|
|
508
508
|
if rel_schema.cardinality == "one":
|
|
509
|
+
if isinstance(rel_data, RelatedNode):
|
|
510
|
+
peer_id_data: dict[str, Any] = {
|
|
511
|
+
key: value
|
|
512
|
+
for key, value in (
|
|
513
|
+
("id", rel_data.id),
|
|
514
|
+
("hfid", rel_data.hfid),
|
|
515
|
+
("__typename", rel_data.typename),
|
|
516
|
+
("kind", rel_data.kind),
|
|
517
|
+
("display_label", rel_data.display_label),
|
|
518
|
+
)
|
|
519
|
+
if value is not None
|
|
520
|
+
}
|
|
521
|
+
if peer_id_data:
|
|
522
|
+
rel_data = peer_id_data
|
|
523
|
+
else:
|
|
524
|
+
rel_data = None
|
|
509
525
|
self._relationship_cardinality_one_data[rel_schema.name] = RelatedNode(
|
|
510
526
|
name=rel_schema.name, branch=self._branch, client=self._client, schema=rel_schema, data=rel_data
|
|
511
527
|
)
|
|
@@ -1079,10 +1095,25 @@ class InfrahubNodeSync(InfrahubNodeBase):
|
|
|
1079
1095
|
rel_data = data.get(rel_schema.name, None) if isinstance(data, dict) else None
|
|
1080
1096
|
|
|
1081
1097
|
if rel_schema.cardinality == "one":
|
|
1098
|
+
if isinstance(rel_data, RelatedNodeSync):
|
|
1099
|
+
peer_id_data: dict[str, Any] = {
|
|
1100
|
+
key: value
|
|
1101
|
+
for key, value in (
|
|
1102
|
+
("id", rel_data.id),
|
|
1103
|
+
("hfid", rel_data.hfid),
|
|
1104
|
+
("__typename", rel_data.typename),
|
|
1105
|
+
("kind", rel_data.kind),
|
|
1106
|
+
("display_label", rel_data.display_label),
|
|
1107
|
+
)
|
|
1108
|
+
if value is not None
|
|
1109
|
+
}
|
|
1110
|
+
if peer_id_data:
|
|
1111
|
+
rel_data = peer_id_data
|
|
1112
|
+
else:
|
|
1113
|
+
rel_data = None
|
|
1082
1114
|
self._relationship_cardinality_one_data[rel_schema.name] = RelatedNodeSync(
|
|
1083
1115
|
name=rel_schema.name, branch=self._branch, client=self._client, schema=rel_schema, data=rel_data
|
|
1084
1116
|
)
|
|
1085
|
-
|
|
1086
1117
|
else:
|
|
1087
1118
|
self._relationship_cardinality_many_data[rel_schema.name] = RelationshipManagerSync(
|
|
1088
1119
|
name=rel_schema.name,
|
|
@@ -39,6 +39,7 @@ class RelatedNodeBase:
|
|
|
39
39
|
self._hfid: list[str] | None = None
|
|
40
40
|
self._display_label: str | None = None
|
|
41
41
|
self._typename: str | None = None
|
|
42
|
+
self._kind: str | None = None
|
|
42
43
|
|
|
43
44
|
if isinstance(data, (CoreNodeBase)):
|
|
44
45
|
self._peer = data
|
|
@@ -118,6 +119,12 @@ class RelatedNodeBase:
|
|
|
118
119
|
return self._peer.typename
|
|
119
120
|
return self._typename
|
|
120
121
|
|
|
122
|
+
@property
|
|
123
|
+
def kind(self) -> str | None:
|
|
124
|
+
if self._peer:
|
|
125
|
+
return self._peer.get_kind()
|
|
126
|
+
return self._kind
|
|
127
|
+
|
|
121
128
|
def _generate_input_data(self, allocate_from_pool: bool = False) -> dict[str, Any]:
|
|
122
129
|
data: dict[str, Any] = {}
|
|
123
130
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: infrahub-server
|
|
3
|
-
Version: 1.3.
|
|
3
|
+
Version: 1.3.2
|
|
4
4
|
Summary: Infrahub is taking a new approach to Infrastructure Management by providing a new generation of datastore to organize and control all the data that defines how an infrastructure should run.
|
|
5
5
|
License: AGPL-3.0-only
|
|
6
6
|
Author: OpsMill
|