infrahub-server 1.2.4__py3-none-any.whl → 1.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. infrahub/cli/db.py +308 -2
  2. infrahub/core/branch/tasks.py +50 -10
  3. infrahub/core/graph/__init__.py +1 -1
  4. infrahub/core/migrations/graph/__init__.py +2 -0
  5. infrahub/core/migrations/graph/m026_0000_prefix_fix.py +54 -0
  6. infrahub/core/node/__init__.py +4 -1
  7. infrahub/core/node/resource_manager/number_pool.py +1 -1
  8. infrahub/core/schema/manager.py +0 -1
  9. infrahub/core/schema/schema_branch.py +5 -3
  10. infrahub/core/schema/schema_branch_computed.py +12 -1
  11. infrahub/events/branch_action.py +3 -0
  12. infrahub/events/group_action.py +1 -1
  13. infrahub/events/node_action.py +1 -1
  14. infrahub/git/integrator.py +2 -2
  15. infrahub/message_bus/messages/__init__.py +0 -2
  16. infrahub/message_bus/messages/request_proposedchange_pipeline.py +5 -0
  17. infrahub/message_bus/operations/__init__.py +0 -2
  18. infrahub/message_bus/operations/requests/proposed_change.py +29 -9
  19. infrahub/message_bus/types.py +2 -34
  20. infrahub/proposed_change/branch_diff.py +65 -0
  21. infrahub/proposed_change/tasks.py +12 -4
  22. infrahub/services/adapters/workflow/worker.py +1 -1
  23. infrahub/workflows/catalogue.py +10 -0
  24. infrahub_sdk/generator.py +1 -0
  25. infrahub_sdk/node.py +16 -4
  26. {infrahub_server-1.2.4.dist-info → infrahub_server-1.2.5.dist-info}/METADATA +2 -2
  27. {infrahub_server-1.2.4.dist-info → infrahub_server-1.2.5.dist-info}/RECORD +32 -34
  28. infrahub_testcontainers/models.py +2 -2
  29. infrahub_testcontainers/performance_test.py +4 -4
  30. infrahub/core/branch/flow_models.py +0 -0
  31. infrahub/message_bus/messages/event_branch_merge.py +0 -13
  32. infrahub/message_bus/operations/event/__init__.py +0 -3
  33. infrahub/message_bus/operations/event/branch.py +0 -61
  34. {infrahub_server-1.2.4.dist-info → infrahub_server-1.2.5.dist-info}/LICENSE.txt +0 -0
  35. {infrahub_server-1.2.4.dist-info → infrahub_server-1.2.5.dist-info}/WHEEL +0 -0
  36. {infrahub_server-1.2.4.dist-info → infrahub_server-1.2.5.dist-info}/entry_points.txt +0 -0
infrahub/cli/db.py CHANGED
@@ -3,10 +3,15 @@ from __future__ import annotations
3
3
  import importlib
4
4
  import logging
5
5
  import os
6
+ from collections import defaultdict
7
+ from csv import DictReader, DictWriter
8
+ from datetime import datetime, timezone
6
9
  from enum import Enum
7
- from typing import TYPE_CHECKING
10
+ from pathlib import Path
11
+ from typing import TYPE_CHECKING, Any
8
12
 
9
13
  import typer
14
+ import ujson
10
15
  from infrahub_sdk.async_typer import AsyncTyper
11
16
  from prefect.testing.utilities import prefect_test_harness
12
17
  from rich import print as rprint
@@ -19,7 +24,14 @@ from infrahub.core import registry
19
24
  from infrahub.core.graph import GRAPH_VERSION
20
25
  from infrahub.core.graph.constraints import ConstraintManagerBase, ConstraintManagerMemgraph, ConstraintManagerNeo4j
21
26
  from infrahub.core.graph.index import node_indexes, rel_indexes
22
- from infrahub.core.graph.schema import GRAPH_SCHEMA
27
+ from infrahub.core.graph.schema import (
28
+ GRAPH_SCHEMA,
29
+ GraphAttributeProperties,
30
+ GraphNodeProperties,
31
+ GraphRelationshipDefault,
32
+ GraphRelationshipIsPartOf,
33
+ GraphRelationshipProperties,
34
+ )
23
35
  from infrahub.core.initialization import (
24
36
  first_time_initialization,
25
37
  get_root_node,
@@ -419,3 +431,297 @@ async def update_core_schema(
419
431
  for message in migration_error_msgs:
420
432
  rprint(message)
421
433
  raise typer.Exit(1)
434
+
435
+
436
+ @app.command(name="selected-export")
437
+ async def selected_export_cmd(
438
+ ctx: typer.Context,
439
+ kinds: list[str] = typer.Option([], help="Node kinds to export"), # noqa: B008
440
+ uuids: list[str] = typer.Option([], help="UUIDs of nodes to export"), # noqa: B008
441
+ query_limit: int = typer.Option(1000, help="Maximum batch size of export query"), # noqa: B008
442
+ export_dir: Path = typer.Option(Path("infrahub-exports"), help="Path of directory to save exports"), # noqa: B008
443
+ config_file: str = typer.Argument("infrahub.toml", envvar="INFRAHUB_CONFIG"),
444
+ ) -> None:
445
+ """Export database structure of selected nodes from the database without any actual data"""
446
+ logging.getLogger("infrahub").setLevel(logging.WARNING)
447
+ logging.getLogger("neo4j").setLevel(logging.ERROR)
448
+ logging.getLogger("prefect").setLevel(logging.ERROR)
449
+
450
+ config.load_and_exit(config_file_name=config_file)
451
+
452
+ context: CliContext = ctx.obj
453
+ dbdriver = await context.init_db(retry=1)
454
+
455
+ await selected_export(db=dbdriver, kinds=kinds, uuids=uuids, export_dir=export_dir, query_limit=query_limit)
456
+
457
+ await dbdriver.close()
458
+
459
+
460
+ async def selected_export(
461
+ db: InfrahubDatabase,
462
+ kinds: list[str],
463
+ uuids: list[str],
464
+ export_dir: Path,
465
+ query_limit: int = 1000,
466
+ ) -> Path:
467
+ query = """
468
+ // --------------
469
+ // filter nodes
470
+ // --------------
471
+ MATCH (n:Node)
472
+ WHERE ($kinds IS NULL OR size($kinds) = 0 OR any(l IN labels(n) WHERE l in $kinds))
473
+ AND ($uuids IS NULL OR size($uuids) = 0 OR n.uuid IN $uuids)
474
+ WITH n
475
+ // --------------
476
+ // pagination
477
+ // --------------
478
+ ORDER BY %(id_func)s(n)
479
+ SKIP toInteger($offset)
480
+ LIMIT toInteger($limit)
481
+ CALL {
482
+ // --------------
483
+ // get all the nodes and edges linked to this node up to 2 steps away, excluding IS_PART_OF
484
+ // --------------
485
+ WITH n
486
+ MATCH (n)-[r1]-(v1)-[r2]-(v2)
487
+ WHERE type(r1) <> "IS_PART_OF"
488
+ WITH collect([v1, v2]) AS vertex_pairs, collect([r1, r2]) AS edge_pairs
489
+ WITH reduce(
490
+ vertices = [], v_pair IN vertex_pairs |
491
+ CASE
492
+ WHEN NOT v_pair[0] IN vertices AND NOT v_pair[1] IN vertices THEN vertices + v_pair
493
+ WHEN NOT v_pair[0] IN vertices THEN vertices + [v_pair[0]]
494
+ WHEN NOT v_pair[1] IN vertices THEN vertices + [v_pair[1]]
495
+ ELSE vertices
496
+ END
497
+ ) AS vertices,
498
+ reduce(
499
+ edges = [], e_pair IN edge_pairs |
500
+ CASE
501
+ WHEN NOT e_pair[0] IN edges AND NOT e_pair[1] IN edges THEN edges + e_pair
502
+ WHEN NOT e_pair[0] IN edges THEN edges + [e_pair[0]]
503
+ WHEN NOT e_pair[1] IN edges THEN edges + [e_pair[1]]
504
+ ELSE edges
505
+ END
506
+ ) AS edges
507
+ RETURN vertices, edges
508
+ }
509
+ // --------------
510
+ // include the root and IS_PART_OF edges
511
+ // --------------
512
+ OPTIONAL MATCH (n)-[root_edge:IS_PART_OF]->(root:Root)
513
+ WITH n, vertices, edges, root, collect(root_edge) AS root_edges
514
+ WITH n, edges + root_edges AS edges, CASE
515
+ WHEN root IS NOT NULL THEN vertices + [n, root]
516
+ ELSE vertices + [n]
517
+ END AS vertices
518
+ RETURN vertices, edges
519
+ """ % {"id_func": db.get_id_function_name()}
520
+ timestamp_str = datetime.now(tz=timezone.utc).strftime("%Y%m%d-%H%M%S")
521
+ export_dir /= Path(f"export-{timestamp_str}")
522
+ if not export_dir.exists():
523
+ export_dir.mkdir(parents=True)
524
+ vertex_path = export_dir / Path("vertices.csv")
525
+ vertex_path.touch(exist_ok=True)
526
+ edge_path = export_dir / Path("edges.csv")
527
+ edge_path.touch(exist_ok=True)
528
+
529
+ graph_node_schemas = [GraphNodeProperties, GraphRelationshipProperties, GraphAttributeProperties]
530
+ graph_vertex_properties = set()
531
+ for graph_schema in graph_node_schemas:
532
+ for field_name, field_info in graph_schema.model_fields.items():
533
+ property_name = field_info.alias or field_name
534
+ graph_vertex_properties.add(property_name)
535
+
536
+ graph_edge_schemas = [GraphRelationshipIsPartOf, GraphRelationshipDefault]
537
+ graph_edge_properties = set()
538
+ for graph_schema in graph_edge_schemas:
539
+ for field_name, field_info in graph_schema.model_fields.items():
540
+ property_name = field_info.alias or field_name
541
+ graph_edge_properties.add(property_name)
542
+
543
+ all_db_ids: set[str] = set()
544
+ has_more_data = True
545
+ limit = query_limit
546
+ offset = 0
547
+
548
+ with vertex_path.open(mode="w") as vertex_file, edge_path.open(mode="w") as edge_file:
549
+ vertex_field_names = ["db_id", "labels"] + sorted(graph_vertex_properties)
550
+ vertex_csv_writer = DictWriter(vertex_file, fieldnames=vertex_field_names)
551
+ vertex_csv_writer.writeheader()
552
+ edge_field_names = ["db_id", "edge_type", "start_node_id", "end_node_id"] + sorted(graph_edge_properties)
553
+ edge_csv_writer = DictWriter(edge_file, fieldnames=edge_field_names)
554
+ edge_csv_writer.writeheader()
555
+
556
+ while has_more_data:
557
+ rprint("Retrieving batch of vertices and edges...", end="")
558
+ results = await db.execute_query(
559
+ query=query,
560
+ params={"kinds": kinds, "uuids": uuids, "limit": limit, "offset": offset},
561
+ )
562
+ rprint("done. ", end="")
563
+ has_more_data = len(results) >= limit
564
+ offset += limit
565
+
566
+ rprint("Writing batch to export files...", end="")
567
+ for result in results:
568
+ vertices = result.get("vertices")
569
+ for vertex in vertices:
570
+ if vertex.element_id in all_db_ids:
571
+ continue
572
+ serial_vertex = {
573
+ "db_id": vertex.element_id,
574
+ "labels": ujson.dumps(list(vertex.labels)),
575
+ }
576
+ for property_name in graph_vertex_properties:
577
+ if value := vertex.get(property_name):
578
+ serial_vertex[property_name] = value
579
+ vertex_csv_writer.writerow(serial_vertex)
580
+ all_db_ids.add(vertex.element_id)
581
+
582
+ edges = result.get("edges")
583
+ for edge in edges:
584
+ if edge.element_id in all_db_ids:
585
+ continue
586
+ serial_edge = {
587
+ "db_id": edge.element_id,
588
+ "edge_type": edge.type,
589
+ "start_node_id": edge.start_node.element_id,
590
+ "end_node_id": edge.end_node.element_id,
591
+ }
592
+ for property_name in graph_edge_properties:
593
+ if value := edge.get(property_name):
594
+ serial_edge[property_name] = value
595
+ edge_csv_writer.writerow(serial_edge)
596
+ all_db_ids.add(edge.element_id)
597
+ rprint("done.")
598
+
599
+ rprint(f"{SUCCESS_BADGE} Export complete")
600
+ rprint(f"Export directory is here: {export_dir.absolute()}")
601
+ return export_dir
602
+
603
+
604
+ @app.command(name="load-export", hidden=True)
605
+ async def load_export_cmd(
606
+ ctx: typer.Context,
607
+ export_dir: Path = typer.Argument(help="Path to export directory"),
608
+ query_limit: int = typer.Option(1000, help="Maximum batch size of import query"),
609
+ config_file: str = typer.Argument("infrahub.toml", envvar="INFRAHUB_CONFIG"),
610
+ ) -> None:
611
+ """
612
+ Cannot be used for backup/restore functionality.
613
+ Loads an anonymized export into Neo4j.
614
+ Only used for analysis of output of the selected-export command.
615
+ """
616
+ logging.getLogger("infrahub").setLevel(logging.WARNING)
617
+ logging.getLogger("neo4j").setLevel(logging.ERROR)
618
+ logging.getLogger("prefect").setLevel(logging.ERROR)
619
+
620
+ config.load_and_exit(config_file_name=config_file)
621
+
622
+ context: CliContext = ctx.obj
623
+ dbdriver = await context.init_db(retry=1)
624
+
625
+ await load_export(db=dbdriver, export_dir=export_dir, query_limit=query_limit)
626
+
627
+ await dbdriver.close()
628
+
629
+
630
+ async def load_vertices(
631
+ db: InfrahubDatabase, vertex_labels: list[str], vertex_dicts: list[dict[str, str | int | bool | None]]
632
+ ) -> None:
633
+ vertex_import_query = """
634
+ UNWIND $vertices AS vertex
635
+ CREATE (v:ImportNode:%(node_labels)s {db_id: vertex.db_id})
636
+ SET v = vertex
637
+ """ % {"node_labels": ":".join(vertex_labels)}
638
+ rprint(f"Loading {len(vertex_dicts)} {vertex_labels} nodes...", end="")
639
+ await db.execute_query(query=vertex_import_query, params={"vertices": vertex_dicts})
640
+ rprint("done")
641
+
642
+
643
+ async def load_edges(
644
+ db: InfrahubDatabase, edge_type: str, edge_dicts: list[dict[str, str | int | bool | None]]
645
+ ) -> None:
646
+ edges_import_query = """
647
+ UNWIND $edges AS edge
648
+ MATCH (a:ImportNode) WHERE a.db_id = toString(edge.start_node_id)
649
+ MATCH (b:ImportNode) WHERE b.db_id = toString(edge.end_node_id)
650
+ CREATE (a)-[e:%(edge_type)s]->(b)
651
+ SET e = edge.properties
652
+ """ % {"edge_type": edge_type}
653
+ rprint(f"Loading {len(edge_dicts)} {edge_type} edges...", end="")
654
+ await db.execute_query(query=edges_import_query, params={"edges": edge_dicts})
655
+ rprint("done")
656
+
657
+
658
+ async def load_export(db: InfrahubDatabase, export_dir: Path, query_limit: int = 1000) -> None:
659
+ if not export_dir.exists():
660
+ rprint(f"{ERROR_BADGE} {export_dir} does not exist")
661
+ raise typer.Exit(1)
662
+ if not export_dir.is_dir():
663
+ rprint(f"{ERROR_BADGE} {export_dir} is not a directory")
664
+ raise typer.Exit(1)
665
+ vertex_file: Path | None = None
666
+ edge_file: Path | None = None
667
+
668
+ for export_file in export_dir.glob("*.csv"):
669
+ if export_file.name == "vertices.csv":
670
+ vertex_file = export_file
671
+ elif export_file.name == "edges.csv":
672
+ edge_file = export_file
673
+ if not vertex_file or not vertex_file.exists() or not vertex_file.is_file():
674
+ rprint(f"{ERROR_BADGE} File 'vertices.csv' does not exist in the export directory")
675
+ raise typer.Exit(1)
676
+ if not edge_file or not edge_file.exists() or not edge_file.is_file():
677
+ rprint(f"{ERROR_BADGE} File 'edges.csv' does not exist in the export directory")
678
+ raise typer.Exit(1)
679
+
680
+ # index massively improves time required to load a large export
681
+ create_index_query = "CREATE RANGE INDEX import_node_db_id IF NOT EXISTS FOR (v:ImportNode) ON (v.db_id)"
682
+ await db.execute_query(query=create_index_query)
683
+
684
+ rprint("Loading vertices...")
685
+ vertices_by_labels_map: dict[frozenset[str], list[dict[str, Any]]] = defaultdict(list)
686
+ with vertex_file.open() as file:
687
+ csv_reader = DictReader(file)
688
+ for vertex_row in csv_reader:
689
+ labels = frozenset(ujson.loads(vertex_row["labels"]))
690
+ cleaned_row = {k: v for k, v in vertex_row.items() if k != "labels" and v}
691
+ vertices_by_labels_map[labels].append(cleaned_row)
692
+ # once a group of vertices meets the query_limit, save them to the database and delete them from memory
693
+ if len(vertices_by_labels_map[labels]) >= query_limit:
694
+ await load_vertices(db=db, vertex_labels=list(labels), vertex_dicts=vertices_by_labels_map[labels])
695
+ vertices_by_labels_map[labels] = []
696
+
697
+ for labels, vertex_rows in vertices_by_labels_map.items():
698
+ await load_vertices(db=db, vertex_labels=list(labels), vertex_dicts=vertex_rows)
699
+ rprint("Vertices loaded")
700
+
701
+ rprint("Loading edges...")
702
+ edges_by_type_map: dict[str, list[dict[str, Any]]] = defaultdict(list)
703
+ with edge_file.open() as file:
704
+ csv_reader = DictReader(file)
705
+ for edge_row in csv_reader:
706
+ edge_type = edge_row["edge_type"]
707
+
708
+ edge_properties = {}
709
+ edge_dict = {}
710
+ for k, v in edge_row.items():
711
+ if k == "edge_type" or not v:
712
+ continue
713
+ if k in ["start_node_id", "end_node_id"]:
714
+ edge_dict[k] = v
715
+ else:
716
+ edge_properties[k] = v
717
+ edge_dict["properties"] = edge_properties
718
+ edges_by_type_map[edge_type].append(edge_dict)
719
+ # once a group of edges meets the query_limit, save them to the database and delete them from memory
720
+ if len(edges_by_type_map[edge_type]) >= query_limit:
721
+ await load_edges(db=db, edge_type=edge_type, edge_dicts=edges_by_type_map[edge_type])
722
+ edges_by_type_map[edge_type] = []
723
+
724
+ for edge_type, edge_dicts in edges_by_type_map.items():
725
+ await load_edges(db=db, edge_type=edge_type, edge_dicts=edge_dicts)
726
+ rprint("Edges loaded")
727
+ rprint(f"{SUCCESS_BADGE} Export loaded")
@@ -18,6 +18,7 @@ from infrahub.core.diff.coordinator import DiffCoordinator
18
18
  from infrahub.core.diff.ipam_diff_parser import IpamDiffParser
19
19
  from infrahub.core.diff.merger.merger import DiffMerger
20
20
  from infrahub.core.diff.model.path import BranchTrackingId, EnrichedDiffRoot, EnrichedDiffRootMetadata
21
+ from infrahub.core.diff.models import RequestDiffUpdate
21
22
  from infrahub.core.diff.repository.repository import DiffRepository
22
23
  from infrahub.core.merge import BranchMerger
23
24
  from infrahub.core.migrations.schema.models import SchemaApplyMigrationData
@@ -32,15 +33,16 @@ from infrahub.events.models import EventMeta, InfrahubEvent
32
33
  from infrahub.events.node_action import get_node_event
33
34
  from infrahub.exceptions import BranchNotFoundError, MergeFailedError, ValidationError
34
35
  from infrahub.graphql.mutations.models import BranchCreateModel # noqa: TC001
35
- from infrahub.log import get_log_data
36
- from infrahub.message_bus import Meta, messages
37
36
  from infrahub.services import InfrahubServices # noqa: TC001 needed for prefect flow
38
- from infrahub.worker import WORKER_IDENTITY
39
37
  from infrahub.workflows.catalogue import (
40
38
  BRANCH_CANCEL_PROPOSED_CHANGES,
39
+ BRANCH_MERGE_POST_PROCESS,
41
40
  DIFF_REFRESH_ALL,
41
+ DIFF_UPDATE,
42
42
  GIT_REPOSITORIES_CREATE_BRANCH,
43
43
  IPAM_RECONCILIATION,
44
+ TRIGGER_ARTIFACT_DEFINITION_GENERATE,
45
+ TRIGGER_GENERATOR_DEFINITION_RUN,
44
46
  )
45
47
  from infrahub.workflows.utils import add_tags
46
48
 
@@ -271,15 +273,11 @@ async def merge_branch(
271
273
  # NOTE: we still need to convert this event and potentially pull
272
274
  # some tasks currently executed based on the event into this workflow
273
275
  # -------------------------------------------------------------
274
- log_data = get_log_data()
275
- request_id = log_data.get("request_id", "")
276
- message = messages.EventBranchMerge(
277
- source_branch=obj.name,
278
- target_branch=registry.default_branch,
276
+ await service.workflow.submit_workflow(
277
+ workflow=BRANCH_MERGE_POST_PROCESS,
279
278
  context=context,
280
- meta=Meta(initiator_id=WORKER_IDENTITY, request_id=request_id),
279
+ parameters={"source_branch": obj.name, "target_branch": registry.default_branch},
281
280
  )
282
- await service.message_bus.send(message=message)
283
281
 
284
282
  events: list[InfrahubEvent] = [merge_event]
285
283
 
@@ -412,3 +410,45 @@ async def _get_diff_root(
412
410
  )
413
411
 
414
412
  return default_branch_diff
413
+
414
+
415
+ @flow(
416
+ name="branch-merge-post-process",
417
+ flow_run_name="Run additional tasks after merging {source_branch} in {target_branch}",
418
+ )
419
+ async def post_process_branch_merge(
420
+ source_branch: str, target_branch: str, context: InfrahubContext, service: InfrahubServices
421
+ ) -> None:
422
+ async with service.database.start_session() as db:
423
+ await add_tags(branches=[source_branch])
424
+ log = get_run_logger()
425
+ log.info(f"Running additional tasks after merging {source_branch} within {target_branch}")
426
+
427
+ component_registry = get_component_registry()
428
+ default_branch = registry.get_branch_from_registry()
429
+ diff_repository = await component_registry.get_component(DiffRepository, db=db, branch=default_branch)
430
+ # send diff update requests for every branch-tracking diff
431
+ branch_diff_roots = await diff_repository.get_roots_metadata(base_branch_names=[target_branch])
432
+
433
+ await service.workflow.submit_workflow(
434
+ workflow=TRIGGER_ARTIFACT_DEFINITION_GENERATE,
435
+ context=context,
436
+ parameters={"branch": target_branch},
437
+ )
438
+
439
+ await service.workflow.submit_workflow(
440
+ workflow=TRIGGER_GENERATOR_DEFINITION_RUN,
441
+ context=context,
442
+ parameters={"branch": target_branch},
443
+ )
444
+
445
+ for diff_root in branch_diff_roots:
446
+ if (
447
+ diff_root.base_branch_name != diff_root.diff_branch_name
448
+ and diff_root.tracking_id
449
+ and isinstance(diff_root.tracking_id, BranchTrackingId)
450
+ ):
451
+ request_diff_update_model = RequestDiffUpdate(branch_name=diff_root.diff_branch_name)
452
+ await service.workflow.submit_workflow(
453
+ workflow=DIFF_UPDATE, context=context, parameters={"model": request_diff_update_model}
454
+ )
@@ -1 +1 @@
1
- GRAPH_VERSION = 25
1
+ GRAPH_VERSION = 26
@@ -27,6 +27,7 @@ from .m022_add_generate_template_attr import Migration022
27
27
  from .m023_deduplicate_cardinality_one_relationships import Migration023
28
28
  from .m024_missing_hierarchy_backfill import Migration024
29
29
  from .m025_uniqueness_nulls import Migration025
30
+ from .m026_0000_prefix_fix import Migration026
30
31
 
31
32
  if TYPE_CHECKING:
32
33
  from infrahub.core.root import Root
@@ -59,6 +60,7 @@ MIGRATIONS: list[type[GraphMigration | InternalSchemaMigration | ArbitraryMigrat
59
60
  Migration023,
60
61
  Migration024,
61
62
  Migration025,
63
+ Migration026,
62
64
  ]
63
65
 
64
66
 
@@ -0,0 +1,54 @@
1
+ from __future__ import annotations
2
+
3
+ import ipaddress
4
+ from typing import TYPE_CHECKING, Sequence
5
+
6
+ from infrahub.core.branch.models import Branch
7
+ from infrahub.core.initialization import initialization
8
+ from infrahub.core.ipam.reconciler import IpamReconciler
9
+ from infrahub.core.manager import NodeManager
10
+ from infrahub.core.migrations.shared import MigrationResult
11
+ from infrahub.core.timestamp import Timestamp
12
+ from infrahub.lock import initialize_lock
13
+ from infrahub.log import get_logger
14
+
15
+ from ..shared import InternalSchemaMigration, SchemaMigration
16
+
17
+ if TYPE_CHECKING:
18
+ from infrahub.database import InfrahubDatabase
19
+
20
+ log = get_logger()
21
+
22
+
23
+ class Migration026(InternalSchemaMigration):
24
+ name: str = "026_prefix_0000_fix"
25
+ minimum_version: int = 25
26
+ migrations: Sequence[SchemaMigration] = []
27
+
28
+ async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
29
+ return MigrationResult()
30
+
31
+ async def execute(self, db: InfrahubDatabase) -> MigrationResult:
32
+ # load schemas from database into registry
33
+ initialize_lock()
34
+ await initialization(db=db)
35
+
36
+ at = Timestamp()
37
+ for branch in await Branch.get_list(db=db):
38
+ prefix_0000s = await NodeManager.query(
39
+ db=db, schema="BuiltinIPPrefix", branch=branch, filters={"prefix__values": ["0.0.0.0/0", "::/0"]}
40
+ )
41
+ if not prefix_0000s:
42
+ continue
43
+ ipam_reconciler = IpamReconciler(db=db, branch=branch)
44
+ for prefix in prefix_0000s:
45
+ ip_namespace = await prefix.ip_namespace.get_peer(db=db)
46
+ ip_network = ipaddress.ip_network(prefix.prefix.value)
47
+ await ipam_reconciler.reconcile(
48
+ ip_value=ip_network,
49
+ namespace=ip_namespace,
50
+ node_uuid=prefix.get_id(),
51
+ at=at,
52
+ )
53
+
54
+ return MigrationResult()
@@ -259,7 +259,10 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
259
259
  )
260
260
  return
261
261
 
262
- if number_pool.node.value == self._schema.kind and number_pool.node_attribute.value == attribute.name:
262
+ if (
263
+ number_pool.node.value in [self._schema.kind] + self._schema.inherit_from
264
+ and number_pool.node_attribute.value == attribute.name
265
+ ):
263
266
  try:
264
267
  next_free = await number_pool.get_resource(db=db, branch=self._branch, node=self)
265
268
  except PoolExhaustedError:
@@ -50,7 +50,7 @@ class CoreNumberPool(Node):
50
50
  taken=taken,
51
51
  )
52
52
  if next_number is None:
53
- raise PoolExhaustedError("There are no more addresses available in this pool.")
53
+ raise PoolExhaustedError("There are no more values available in this pool.")
54
54
 
55
55
  return next_number
56
56
 
@@ -155,7 +155,6 @@ class SchemaManager(NodeManager):
155
155
 
156
156
  updated_schema = None
157
157
  if update_db:
158
- schema_diff = None
159
158
  if diff:
160
159
  schema_diff = await self.update_schema_to_db(schema=schema, db=db, branch=branch, diff=diff)
161
160
  else:
@@ -1005,9 +1005,11 @@ class SchemaBranch:
1005
1005
  generic_schema = self.get_generic(name=name, duplicate=False)
1006
1006
  for attribute in generic_schema.attributes:
1007
1007
  if attribute.computed_attribute and attribute.computed_attribute.kind != ComputedAttributeKind.USER:
1008
- raise ValueError(
1009
- f"{generic_schema.kind}: Attribute {attribute.name!r} computed attributes are only allowed on nodes not generics"
1010
- )
1008
+ for inheriting_node in generic_schema.used_by:
1009
+ node_schema = self.get_node(name=inheriting_node, duplicate=False)
1010
+ self.computed_attributes.validate_generic_inheritance(
1011
+ node=node_schema, attribute=attribute, generic=generic_schema
1012
+ )
1011
1013
 
1012
1014
  def _validate_computed_attribute(self, node: NodeSchema, attribute: AttributeSchema) -> None:
1013
1015
  if not attribute.computed_attribute or attribute.computed_attribute.kind == ComputedAttributeKind.USER:
@@ -9,7 +9,7 @@ from pydantic import BaseModel, Field
9
9
  from infrahub.core.schema import AttributeSchema # noqa: TC001
10
10
 
11
11
  if TYPE_CHECKING:
12
- from infrahub.core.schema import NodeSchema, SchemaAttributePath
12
+ from infrahub.core.schema import GenericSchema, NodeSchema, SchemaAttributePath
13
13
 
14
14
 
15
15
  @dataclass
@@ -90,6 +90,7 @@ class ComputedAttributes:
90
90
  ) -> None:
91
91
  self._computed_python_transform_attribute_map: dict[str, list[AttributeSchema]] = transform_attribute_map or {}
92
92
  self._computed_jinja2_attribute_map: dict[str, RegisteredNodeComputedAttribute] = jinja2_attribute_map or {}
93
+ self._defined_from_generic: dict[str, str] = {}
93
94
 
94
95
  def duplicate(self) -> ComputedAttributes:
95
96
  return self.__class__(
@@ -166,6 +167,16 @@ class ComputedAttributes:
166
167
  schema_path.active_relationship_schema.name
167
168
  ].append(deepcopy(source_attribute))
168
169
 
170
+ def validate_generic_inheritance(
171
+ self, node: NodeSchema, attribute: AttributeSchema, generic: GenericSchema
172
+ ) -> None:
173
+ attribute_key = f"{node.kind}__{attribute.name}"
174
+ if duplicate := self._defined_from_generic.get(attribute_key):
175
+ raise ValueError(
176
+ f"{node.kind}: {attribute.name!r} is declared as a computed attribute from multiple generics {sorted([duplicate, generic.kind])}"
177
+ )
178
+ self._defined_from_generic[attribute_key] = generic.kind
179
+
169
180
  def get_impacted_jinja2_targets(self, kind: str, updates: list[str] | None = None) -> list[ComputedAttributeTarget]:
170
181
  if mapping := self._computed_jinja2_attribute_map.get(kind):
171
182
  return mapping.get_targets(updates=updates)
@@ -100,6 +100,9 @@ class BranchMergedEvent(InfrahubEvent):
100
100
 
101
101
  return related
102
102
 
103
+ def get_messages(self) -> list[InfrahubMessage]:
104
+ return [RefreshRegistryBranches()]
105
+
103
106
 
104
107
  class BranchRebasedEvent(InfrahubEvent):
105
108
  """Event generated when a branch has been rebased"""
@@ -22,7 +22,7 @@ class GroupMutatedEvent(InfrahubEvent):
22
22
  def get_related(self) -> list[dict[str, str]]:
23
23
  related = super().get_related()
24
24
 
25
- if self.kind == InfrahubKind.GRAPHQLQUERYGROUP:
25
+ if self.kind in [InfrahubKind.GENERATORGROUP, InfrahubKind.GRAPHQLQUERYGROUP]:
26
26
  # Temporary workaround to avoid too large payloads for the related field
27
27
  return related
28
28
 
@@ -24,7 +24,7 @@ class NodeMutatedEvent(InfrahubEvent):
24
24
 
25
25
  def get_related(self) -> list[dict[str, str]]:
26
26
  related = super().get_related()
27
- if self.kind == InfrahubKind.GRAPHQLQUERYGROUP:
27
+ if self.kind in [InfrahubKind.GENERATORGROUP, InfrahubKind.GRAPHQLQUERYGROUP]:
28
28
  # Temporary workaround to avoid too large payloads for the related field
29
29
  return related
30
30
 
@@ -954,7 +954,7 @@ class InfrahubRepositoryIntegrator(InfrahubRepositoryBase):
954
954
  source=self.id,
955
955
  is_protected=True,
956
956
  )
957
- obj = await self.sdk.create(kind=CoreCheckDefinition, branch=branch_name, **create_payload)
957
+ obj = await self.sdk.create(kind=CoreCheckDefinition, branch=branch_name, data=create_payload)
958
958
  await obj.save()
959
959
 
960
960
  return obj
@@ -1012,7 +1012,7 @@ class InfrahubRepositoryIntegrator(InfrahubRepositoryBase):
1012
1012
  source=str(self.id),
1013
1013
  is_protected=True,
1014
1014
  )
1015
- obj = await self.sdk.create(kind=CoreTransformPython, branch=branch_name, **create_payload)
1015
+ obj = await self.sdk.create(kind=CoreTransformPython, branch=branch_name, data=create_payload)
1016
1016
  await obj.save()
1017
1017
  return obj
1018
1018
 
@@ -1,7 +1,6 @@
1
1
  from infrahub.message_bus import InfrahubMessage, InfrahubResponse
2
2
 
3
3
  from .check_generator_run import CheckGeneratorRun
4
- from .event_branch_merge import EventBranchMerge
5
4
  from .finalize_validator_execution import FinalizeValidatorExecution
6
5
  from .git_file_get import GitFileGet, GitFileGetResponse
7
6
  from .git_repository_connectivity import GitRepositoryConnectivity
@@ -15,7 +14,6 @@ from .send_echo_request import SendEchoRequest, SendEchoRequestResponse
15
14
 
16
15
  MESSAGE_MAP: dict[str, type[InfrahubMessage]] = {
17
16
  "check.generator.run": CheckGeneratorRun,
18
- "event.branch.merge": EventBranchMerge,
19
17
  "finalize.validator.execution": FinalizeValidatorExecution,
20
18
  "git.file.get": GitFileGet,
21
19
  "git.repository.connectivity": GitRepositoryConnectivity,
@@ -1,3 +1,5 @@
1
+ import uuid
2
+
1
3
  from pydantic import Field
2
4
 
3
5
  from infrahub.context import InfrahubContext
@@ -16,3 +18,6 @@ class RequestProposedChangePipeline(InfrahubMessage):
16
18
  default=CheckType.ALL, description="Can be used to restrict the pipeline to a specific type of job"
17
19
  )
18
20
  context: InfrahubContext = Field(..., description="The context of the task")
21
+ pipeline_id: uuid.UUID = Field(
22
+ default_factory=uuid.uuid4, description="The unique ID of the execution of this pipeline"
23
+ )