infrahub-server 1.2.3__py3-none-any.whl → 1.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/cli/db.py +308 -2
- infrahub/cli/git_agent.py +4 -10
- infrahub/config.py +32 -0
- infrahub/core/branch/tasks.py +50 -10
- infrahub/core/constants/__init__.py +1 -0
- infrahub/core/constraint/node/runner.py +6 -5
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/migrations/graph/__init__.py +4 -0
- infrahub/core/migrations/graph/m018_uniqueness_nulls.py +68 -70
- infrahub/core/migrations/graph/m025_uniqueness_nulls.py +26 -0
- infrahub/core/migrations/graph/m026_0000_prefix_fix.py +54 -0
- infrahub/core/migrations/schema/node_attribute_remove.py +16 -2
- infrahub/core/models.py +1 -1
- infrahub/core/node/__init__.py +4 -1
- infrahub/core/node/constraints/grouped_uniqueness.py +6 -1
- infrahub/core/node/resource_manager/number_pool.py +1 -1
- infrahub/core/registry.py +18 -0
- infrahub/core/schema/basenode_schema.py +21 -1
- infrahub/core/schema/definitions/internal.py +2 -1
- infrahub/core/schema/generated/base_node_schema.py +1 -1
- infrahub/core/schema/manager.py +21 -1
- infrahub/core/schema/schema_branch.py +8 -7
- infrahub/core/schema/schema_branch_computed.py +12 -1
- infrahub/database/__init__.py +10 -0
- infrahub/events/branch_action.py +3 -0
- infrahub/events/group_action.py +6 -1
- infrahub/events/node_action.py +5 -1
- infrahub/git/integrator.py +2 -2
- infrahub/graphql/mutations/main.py +10 -12
- infrahub/message_bus/messages/__init__.py +0 -4
- infrahub/message_bus/messages/request_proposedchange_pipeline.py +5 -0
- infrahub/message_bus/operations/__init__.py +0 -3
- infrahub/message_bus/operations/requests/proposed_change.py +29 -9
- infrahub/message_bus/types.py +2 -34
- infrahub/proposed_change/branch_diff.py +65 -0
- infrahub/proposed_change/tasks.py +12 -4
- infrahub/server.py +6 -11
- infrahub/services/adapters/cache/__init__.py +17 -0
- infrahub/services/adapters/cache/redis.py +11 -1
- infrahub/services/adapters/message_bus/__init__.py +20 -0
- infrahub/services/adapters/workflow/worker.py +1 -1
- infrahub/services/component.py +1 -2
- infrahub/tasks/registry.py +3 -7
- infrahub/workers/infrahub_async.py +4 -10
- infrahub/workflows/catalogue.py +10 -0
- infrahub_sdk/generator.py +1 -0
- infrahub_sdk/node.py +16 -4
- infrahub_sdk/schema/__init__.py +10 -1
- {infrahub_server-1.2.3.dist-info → infrahub_server-1.2.5.dist-info}/METADATA +2 -2
- {infrahub_server-1.2.3.dist-info → infrahub_server-1.2.5.dist-info}/RECORD +57 -60
- infrahub_testcontainers/container.py +4 -0
- infrahub_testcontainers/helpers.py +1 -1
- infrahub_testcontainers/models.py +2 -2
- infrahub_testcontainers/performance_test.py +4 -4
- infrahub/core/branch/flow_models.py +0 -0
- infrahub/message_bus/messages/event_branch_merge.py +0 -13
- infrahub/message_bus/messages/event_worker_newprimaryapi.py +0 -9
- infrahub/message_bus/operations/event/__init__.py +0 -3
- infrahub/message_bus/operations/event/branch.py +0 -61
- infrahub/message_bus/operations/event/worker.py +0 -9
- {infrahub_server-1.2.3.dist-info → infrahub_server-1.2.5.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.2.3.dist-info → infrahub_server-1.2.5.dist-info}/WHEEL +0 -0
- {infrahub_server-1.2.3.dist-info → infrahub_server-1.2.5.dist-info}/entry_points.txt +0 -0
infrahub/cli/db.py
CHANGED
|
@@ -3,10 +3,15 @@ from __future__ import annotations
|
|
|
3
3
|
import importlib
|
|
4
4
|
import logging
|
|
5
5
|
import os
|
|
6
|
+
from collections import defaultdict
|
|
7
|
+
from csv import DictReader, DictWriter
|
|
8
|
+
from datetime import datetime, timezone
|
|
6
9
|
from enum import Enum
|
|
7
|
-
from
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import TYPE_CHECKING, Any
|
|
8
12
|
|
|
9
13
|
import typer
|
|
14
|
+
import ujson
|
|
10
15
|
from infrahub_sdk.async_typer import AsyncTyper
|
|
11
16
|
from prefect.testing.utilities import prefect_test_harness
|
|
12
17
|
from rich import print as rprint
|
|
@@ -19,7 +24,14 @@ from infrahub.core import registry
|
|
|
19
24
|
from infrahub.core.graph import GRAPH_VERSION
|
|
20
25
|
from infrahub.core.graph.constraints import ConstraintManagerBase, ConstraintManagerMemgraph, ConstraintManagerNeo4j
|
|
21
26
|
from infrahub.core.graph.index import node_indexes, rel_indexes
|
|
22
|
-
from infrahub.core.graph.schema import
|
|
27
|
+
from infrahub.core.graph.schema import (
|
|
28
|
+
GRAPH_SCHEMA,
|
|
29
|
+
GraphAttributeProperties,
|
|
30
|
+
GraphNodeProperties,
|
|
31
|
+
GraphRelationshipDefault,
|
|
32
|
+
GraphRelationshipIsPartOf,
|
|
33
|
+
GraphRelationshipProperties,
|
|
34
|
+
)
|
|
23
35
|
from infrahub.core.initialization import (
|
|
24
36
|
first_time_initialization,
|
|
25
37
|
get_root_node,
|
|
@@ -419,3 +431,297 @@ async def update_core_schema(
|
|
|
419
431
|
for message in migration_error_msgs:
|
|
420
432
|
rprint(message)
|
|
421
433
|
raise typer.Exit(1)
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
@app.command(name="selected-export")
|
|
437
|
+
async def selected_export_cmd(
|
|
438
|
+
ctx: typer.Context,
|
|
439
|
+
kinds: list[str] = typer.Option([], help="Node kinds to export"), # noqa: B008
|
|
440
|
+
uuids: list[str] = typer.Option([], help="UUIDs of nodes to export"), # noqa: B008
|
|
441
|
+
query_limit: int = typer.Option(1000, help="Maximum batch size of export query"), # noqa: B008
|
|
442
|
+
export_dir: Path = typer.Option(Path("infrahub-exports"), help="Path of directory to save exports"), # noqa: B008
|
|
443
|
+
config_file: str = typer.Argument("infrahub.toml", envvar="INFRAHUB_CONFIG"),
|
|
444
|
+
) -> None:
|
|
445
|
+
"""Export database structure of selected nodes from the database without any actual data"""
|
|
446
|
+
logging.getLogger("infrahub").setLevel(logging.WARNING)
|
|
447
|
+
logging.getLogger("neo4j").setLevel(logging.ERROR)
|
|
448
|
+
logging.getLogger("prefect").setLevel(logging.ERROR)
|
|
449
|
+
|
|
450
|
+
config.load_and_exit(config_file_name=config_file)
|
|
451
|
+
|
|
452
|
+
context: CliContext = ctx.obj
|
|
453
|
+
dbdriver = await context.init_db(retry=1)
|
|
454
|
+
|
|
455
|
+
await selected_export(db=dbdriver, kinds=kinds, uuids=uuids, export_dir=export_dir, query_limit=query_limit)
|
|
456
|
+
|
|
457
|
+
await dbdriver.close()
|
|
458
|
+
|
|
459
|
+
|
|
460
|
+
async def selected_export(
|
|
461
|
+
db: InfrahubDatabase,
|
|
462
|
+
kinds: list[str],
|
|
463
|
+
uuids: list[str],
|
|
464
|
+
export_dir: Path,
|
|
465
|
+
query_limit: int = 1000,
|
|
466
|
+
) -> Path:
|
|
467
|
+
query = """
|
|
468
|
+
// --------------
|
|
469
|
+
// filter nodes
|
|
470
|
+
// --------------
|
|
471
|
+
MATCH (n:Node)
|
|
472
|
+
WHERE ($kinds IS NULL OR size($kinds) = 0 OR any(l IN labels(n) WHERE l in $kinds))
|
|
473
|
+
AND ($uuids IS NULL OR size($uuids) = 0 OR n.uuid IN $uuids)
|
|
474
|
+
WITH n
|
|
475
|
+
// --------------
|
|
476
|
+
// pagination
|
|
477
|
+
// --------------
|
|
478
|
+
ORDER BY %(id_func)s(n)
|
|
479
|
+
SKIP toInteger($offset)
|
|
480
|
+
LIMIT toInteger($limit)
|
|
481
|
+
CALL {
|
|
482
|
+
// --------------
|
|
483
|
+
// get all the nodes and edges linked to this node up to 2 steps away, excluding IS_PART_OF
|
|
484
|
+
// --------------
|
|
485
|
+
WITH n
|
|
486
|
+
MATCH (n)-[r1]-(v1)-[r2]-(v2)
|
|
487
|
+
WHERE type(r1) <> "IS_PART_OF"
|
|
488
|
+
WITH collect([v1, v2]) AS vertex_pairs, collect([r1, r2]) AS edge_pairs
|
|
489
|
+
WITH reduce(
|
|
490
|
+
vertices = [], v_pair IN vertex_pairs |
|
|
491
|
+
CASE
|
|
492
|
+
WHEN NOT v_pair[0] IN vertices AND NOT v_pair[1] IN vertices THEN vertices + v_pair
|
|
493
|
+
WHEN NOT v_pair[0] IN vertices THEN vertices + [v_pair[0]]
|
|
494
|
+
WHEN NOT v_pair[1] IN vertices THEN vertices + [v_pair[1]]
|
|
495
|
+
ELSE vertices
|
|
496
|
+
END
|
|
497
|
+
) AS vertices,
|
|
498
|
+
reduce(
|
|
499
|
+
edges = [], e_pair IN edge_pairs |
|
|
500
|
+
CASE
|
|
501
|
+
WHEN NOT e_pair[0] IN edges AND NOT e_pair[1] IN edges THEN edges + e_pair
|
|
502
|
+
WHEN NOT e_pair[0] IN edges THEN edges + [e_pair[0]]
|
|
503
|
+
WHEN NOT e_pair[1] IN edges THEN edges + [e_pair[1]]
|
|
504
|
+
ELSE edges
|
|
505
|
+
END
|
|
506
|
+
) AS edges
|
|
507
|
+
RETURN vertices, edges
|
|
508
|
+
}
|
|
509
|
+
// --------------
|
|
510
|
+
// include the root and IS_PART_OF edges
|
|
511
|
+
// --------------
|
|
512
|
+
OPTIONAL MATCH (n)-[root_edge:IS_PART_OF]->(root:Root)
|
|
513
|
+
WITH n, vertices, edges, root, collect(root_edge) AS root_edges
|
|
514
|
+
WITH n, edges + root_edges AS edges, CASE
|
|
515
|
+
WHEN root IS NOT NULL THEN vertices + [n, root]
|
|
516
|
+
ELSE vertices + [n]
|
|
517
|
+
END AS vertices
|
|
518
|
+
RETURN vertices, edges
|
|
519
|
+
""" % {"id_func": db.get_id_function_name()}
|
|
520
|
+
timestamp_str = datetime.now(tz=timezone.utc).strftime("%Y%m%d-%H%M%S")
|
|
521
|
+
export_dir /= Path(f"export-{timestamp_str}")
|
|
522
|
+
if not export_dir.exists():
|
|
523
|
+
export_dir.mkdir(parents=True)
|
|
524
|
+
vertex_path = export_dir / Path("vertices.csv")
|
|
525
|
+
vertex_path.touch(exist_ok=True)
|
|
526
|
+
edge_path = export_dir / Path("edges.csv")
|
|
527
|
+
edge_path.touch(exist_ok=True)
|
|
528
|
+
|
|
529
|
+
graph_node_schemas = [GraphNodeProperties, GraphRelationshipProperties, GraphAttributeProperties]
|
|
530
|
+
graph_vertex_properties = set()
|
|
531
|
+
for graph_schema in graph_node_schemas:
|
|
532
|
+
for field_name, field_info in graph_schema.model_fields.items():
|
|
533
|
+
property_name = field_info.alias or field_name
|
|
534
|
+
graph_vertex_properties.add(property_name)
|
|
535
|
+
|
|
536
|
+
graph_edge_schemas = [GraphRelationshipIsPartOf, GraphRelationshipDefault]
|
|
537
|
+
graph_edge_properties = set()
|
|
538
|
+
for graph_schema in graph_edge_schemas:
|
|
539
|
+
for field_name, field_info in graph_schema.model_fields.items():
|
|
540
|
+
property_name = field_info.alias or field_name
|
|
541
|
+
graph_edge_properties.add(property_name)
|
|
542
|
+
|
|
543
|
+
all_db_ids: set[str] = set()
|
|
544
|
+
has_more_data = True
|
|
545
|
+
limit = query_limit
|
|
546
|
+
offset = 0
|
|
547
|
+
|
|
548
|
+
with vertex_path.open(mode="w") as vertex_file, edge_path.open(mode="w") as edge_file:
|
|
549
|
+
vertex_field_names = ["db_id", "labels"] + sorted(graph_vertex_properties)
|
|
550
|
+
vertex_csv_writer = DictWriter(vertex_file, fieldnames=vertex_field_names)
|
|
551
|
+
vertex_csv_writer.writeheader()
|
|
552
|
+
edge_field_names = ["db_id", "edge_type", "start_node_id", "end_node_id"] + sorted(graph_edge_properties)
|
|
553
|
+
edge_csv_writer = DictWriter(edge_file, fieldnames=edge_field_names)
|
|
554
|
+
edge_csv_writer.writeheader()
|
|
555
|
+
|
|
556
|
+
while has_more_data:
|
|
557
|
+
rprint("Retrieving batch of vertices and edges...", end="")
|
|
558
|
+
results = await db.execute_query(
|
|
559
|
+
query=query,
|
|
560
|
+
params={"kinds": kinds, "uuids": uuids, "limit": limit, "offset": offset},
|
|
561
|
+
)
|
|
562
|
+
rprint("done. ", end="")
|
|
563
|
+
has_more_data = len(results) >= limit
|
|
564
|
+
offset += limit
|
|
565
|
+
|
|
566
|
+
rprint("Writing batch to export files...", end="")
|
|
567
|
+
for result in results:
|
|
568
|
+
vertices = result.get("vertices")
|
|
569
|
+
for vertex in vertices:
|
|
570
|
+
if vertex.element_id in all_db_ids:
|
|
571
|
+
continue
|
|
572
|
+
serial_vertex = {
|
|
573
|
+
"db_id": vertex.element_id,
|
|
574
|
+
"labels": ujson.dumps(list(vertex.labels)),
|
|
575
|
+
}
|
|
576
|
+
for property_name in graph_vertex_properties:
|
|
577
|
+
if value := vertex.get(property_name):
|
|
578
|
+
serial_vertex[property_name] = value
|
|
579
|
+
vertex_csv_writer.writerow(serial_vertex)
|
|
580
|
+
all_db_ids.add(vertex.element_id)
|
|
581
|
+
|
|
582
|
+
edges = result.get("edges")
|
|
583
|
+
for edge in edges:
|
|
584
|
+
if edge.element_id in all_db_ids:
|
|
585
|
+
continue
|
|
586
|
+
serial_edge = {
|
|
587
|
+
"db_id": edge.element_id,
|
|
588
|
+
"edge_type": edge.type,
|
|
589
|
+
"start_node_id": edge.start_node.element_id,
|
|
590
|
+
"end_node_id": edge.end_node.element_id,
|
|
591
|
+
}
|
|
592
|
+
for property_name in graph_edge_properties:
|
|
593
|
+
if value := edge.get(property_name):
|
|
594
|
+
serial_edge[property_name] = value
|
|
595
|
+
edge_csv_writer.writerow(serial_edge)
|
|
596
|
+
all_db_ids.add(edge.element_id)
|
|
597
|
+
rprint("done.")
|
|
598
|
+
|
|
599
|
+
rprint(f"{SUCCESS_BADGE} Export complete")
|
|
600
|
+
rprint(f"Export directory is here: {export_dir.absolute()}")
|
|
601
|
+
return export_dir
|
|
602
|
+
|
|
603
|
+
|
|
604
|
+
@app.command(name="load-export", hidden=True)
|
|
605
|
+
async def load_export_cmd(
|
|
606
|
+
ctx: typer.Context,
|
|
607
|
+
export_dir: Path = typer.Argument(help="Path to export directory"),
|
|
608
|
+
query_limit: int = typer.Option(1000, help="Maximum batch size of import query"),
|
|
609
|
+
config_file: str = typer.Argument("infrahub.toml", envvar="INFRAHUB_CONFIG"),
|
|
610
|
+
) -> None:
|
|
611
|
+
"""
|
|
612
|
+
Cannot be used for backup/restore functionality.
|
|
613
|
+
Loads an anonymized export into Neo4j.
|
|
614
|
+
Only used for analysis of output of the selected-export command.
|
|
615
|
+
"""
|
|
616
|
+
logging.getLogger("infrahub").setLevel(logging.WARNING)
|
|
617
|
+
logging.getLogger("neo4j").setLevel(logging.ERROR)
|
|
618
|
+
logging.getLogger("prefect").setLevel(logging.ERROR)
|
|
619
|
+
|
|
620
|
+
config.load_and_exit(config_file_name=config_file)
|
|
621
|
+
|
|
622
|
+
context: CliContext = ctx.obj
|
|
623
|
+
dbdriver = await context.init_db(retry=1)
|
|
624
|
+
|
|
625
|
+
await load_export(db=dbdriver, export_dir=export_dir, query_limit=query_limit)
|
|
626
|
+
|
|
627
|
+
await dbdriver.close()
|
|
628
|
+
|
|
629
|
+
|
|
630
|
+
async def load_vertices(
|
|
631
|
+
db: InfrahubDatabase, vertex_labels: list[str], vertex_dicts: list[dict[str, str | int | bool | None]]
|
|
632
|
+
) -> None:
|
|
633
|
+
vertex_import_query = """
|
|
634
|
+
UNWIND $vertices AS vertex
|
|
635
|
+
CREATE (v:ImportNode:%(node_labels)s {db_id: vertex.db_id})
|
|
636
|
+
SET v = vertex
|
|
637
|
+
""" % {"node_labels": ":".join(vertex_labels)}
|
|
638
|
+
rprint(f"Loading {len(vertex_dicts)} {vertex_labels} nodes...", end="")
|
|
639
|
+
await db.execute_query(query=vertex_import_query, params={"vertices": vertex_dicts})
|
|
640
|
+
rprint("done")
|
|
641
|
+
|
|
642
|
+
|
|
643
|
+
async def load_edges(
|
|
644
|
+
db: InfrahubDatabase, edge_type: str, edge_dicts: list[dict[str, str | int | bool | None]]
|
|
645
|
+
) -> None:
|
|
646
|
+
edges_import_query = """
|
|
647
|
+
UNWIND $edges AS edge
|
|
648
|
+
MATCH (a:ImportNode) WHERE a.db_id = toString(edge.start_node_id)
|
|
649
|
+
MATCH (b:ImportNode) WHERE b.db_id = toString(edge.end_node_id)
|
|
650
|
+
CREATE (a)-[e:%(edge_type)s]->(b)
|
|
651
|
+
SET e = edge.properties
|
|
652
|
+
""" % {"edge_type": edge_type}
|
|
653
|
+
rprint(f"Loading {len(edge_dicts)} {edge_type} edges...", end="")
|
|
654
|
+
await db.execute_query(query=edges_import_query, params={"edges": edge_dicts})
|
|
655
|
+
rprint("done")
|
|
656
|
+
|
|
657
|
+
|
|
658
|
+
async def load_export(db: InfrahubDatabase, export_dir: Path, query_limit: int = 1000) -> None:
|
|
659
|
+
if not export_dir.exists():
|
|
660
|
+
rprint(f"{ERROR_BADGE} {export_dir} does not exist")
|
|
661
|
+
raise typer.Exit(1)
|
|
662
|
+
if not export_dir.is_dir():
|
|
663
|
+
rprint(f"{ERROR_BADGE} {export_dir} is not a directory")
|
|
664
|
+
raise typer.Exit(1)
|
|
665
|
+
vertex_file: Path | None = None
|
|
666
|
+
edge_file: Path | None = None
|
|
667
|
+
|
|
668
|
+
for export_file in export_dir.glob("*.csv"):
|
|
669
|
+
if export_file.name == "vertices.csv":
|
|
670
|
+
vertex_file = export_file
|
|
671
|
+
elif export_file.name == "edges.csv":
|
|
672
|
+
edge_file = export_file
|
|
673
|
+
if not vertex_file or not vertex_file.exists() or not vertex_file.is_file():
|
|
674
|
+
rprint(f"{ERROR_BADGE} File 'vertices.csv' does not exist in the export directory")
|
|
675
|
+
raise typer.Exit(1)
|
|
676
|
+
if not edge_file or not edge_file.exists() or not edge_file.is_file():
|
|
677
|
+
rprint(f"{ERROR_BADGE} File 'edges.csv' does not exist in the export directory")
|
|
678
|
+
raise typer.Exit(1)
|
|
679
|
+
|
|
680
|
+
# index massively improves time required to load a large export
|
|
681
|
+
create_index_query = "CREATE RANGE INDEX import_node_db_id IF NOT EXISTS FOR (v:ImportNode) ON (v.db_id)"
|
|
682
|
+
await db.execute_query(query=create_index_query)
|
|
683
|
+
|
|
684
|
+
rprint("Loading vertices...")
|
|
685
|
+
vertices_by_labels_map: dict[frozenset[str], list[dict[str, Any]]] = defaultdict(list)
|
|
686
|
+
with vertex_file.open() as file:
|
|
687
|
+
csv_reader = DictReader(file)
|
|
688
|
+
for vertex_row in csv_reader:
|
|
689
|
+
labels = frozenset(ujson.loads(vertex_row["labels"]))
|
|
690
|
+
cleaned_row = {k: v for k, v in vertex_row.items() if k != "labels" and v}
|
|
691
|
+
vertices_by_labels_map[labels].append(cleaned_row)
|
|
692
|
+
# once a group of vertices meets the query_limit, save them to the database and delete them from memory
|
|
693
|
+
if len(vertices_by_labels_map[labels]) >= query_limit:
|
|
694
|
+
await load_vertices(db=db, vertex_labels=list(labels), vertex_dicts=vertices_by_labels_map[labels])
|
|
695
|
+
vertices_by_labels_map[labels] = []
|
|
696
|
+
|
|
697
|
+
for labels, vertex_rows in vertices_by_labels_map.items():
|
|
698
|
+
await load_vertices(db=db, vertex_labels=list(labels), vertex_dicts=vertex_rows)
|
|
699
|
+
rprint("Vertices loaded")
|
|
700
|
+
|
|
701
|
+
rprint("Loading edges...")
|
|
702
|
+
edges_by_type_map: dict[str, list[dict[str, Any]]] = defaultdict(list)
|
|
703
|
+
with edge_file.open() as file:
|
|
704
|
+
csv_reader = DictReader(file)
|
|
705
|
+
for edge_row in csv_reader:
|
|
706
|
+
edge_type = edge_row["edge_type"]
|
|
707
|
+
|
|
708
|
+
edge_properties = {}
|
|
709
|
+
edge_dict = {}
|
|
710
|
+
for k, v in edge_row.items():
|
|
711
|
+
if k == "edge_type" or not v:
|
|
712
|
+
continue
|
|
713
|
+
if k in ["start_node_id", "end_node_id"]:
|
|
714
|
+
edge_dict[k] = v
|
|
715
|
+
else:
|
|
716
|
+
edge_properties[k] = v
|
|
717
|
+
edge_dict["properties"] = edge_properties
|
|
718
|
+
edges_by_type_map[edge_type].append(edge_dict)
|
|
719
|
+
# once a group of edges meets the query_limit, save them to the database and delete them from memory
|
|
720
|
+
if len(edges_by_type_map[edge_type]) >= query_limit:
|
|
721
|
+
await load_edges(db=db, edge_type=edge_type, edge_dicts=edges_by_type_map[edge_type])
|
|
722
|
+
edges_by_type_map[edge_type] = []
|
|
723
|
+
|
|
724
|
+
for edge_type, edge_dicts in edges_by_type_map.items():
|
|
725
|
+
await load_edges(db=db, edge_type=edge_type, edge_dicts=edge_dicts)
|
|
726
|
+
rprint("Edges loaded")
|
|
727
|
+
rprint(f"{SUCCESS_BADGE} Export loaded")
|
infrahub/cli/git_agent.py
CHANGED
|
@@ -19,10 +19,8 @@ from infrahub.git import initialize_repositories_directory
|
|
|
19
19
|
from infrahub.lock import initialize_lock
|
|
20
20
|
from infrahub.log import get_logger
|
|
21
21
|
from infrahub.services import InfrahubServices
|
|
22
|
-
from infrahub.services.adapters.cache
|
|
23
|
-
from infrahub.services.adapters.
|
|
24
|
-
from infrahub.services.adapters.message_bus.nats import NATSMessageBus
|
|
25
|
-
from infrahub.services.adapters.message_bus.rabbitmq import RabbitMQMessageBus
|
|
22
|
+
from infrahub.services.adapters.cache import InfrahubCache
|
|
23
|
+
from infrahub.services.adapters.message_bus import InfrahubMessageBus
|
|
26
24
|
from infrahub.services.adapters.workflow.local import WorkflowLocalExecution
|
|
27
25
|
from infrahub.services.adapters.workflow.worker import WorkflowWorkerExecution
|
|
28
26
|
from infrahub.trace import configure_trace
|
|
@@ -121,13 +119,9 @@ async def start(
|
|
|
121
119
|
|
|
122
120
|
component_type = ComponentType.GIT_AGENT
|
|
123
121
|
message_bus = config.OVERRIDE.message_bus or (
|
|
124
|
-
await
|
|
125
|
-
if config.SETTINGS.broker.driver == config.BrokerDriver.NATS
|
|
126
|
-
else await RabbitMQMessageBus.new(component_type=component_type)
|
|
127
|
-
)
|
|
128
|
-
cache = config.OVERRIDE.cache or (
|
|
129
|
-
await NATSCache.new() if config.SETTINGS.cache.driver == config.CacheDriver.NATS else RedisCache()
|
|
122
|
+
await InfrahubMessageBus.new_from_driver(component_type=component_type, driver=config.SETTINGS.broker.driver)
|
|
130
123
|
)
|
|
124
|
+
cache = config.OVERRIDE.cache or (await InfrahubCache.new_from_driver(driver=config.SETTINGS.cache.driver))
|
|
131
125
|
|
|
132
126
|
service = await InfrahubServices.new(
|
|
133
127
|
cache=cache,
|
infrahub/config.py
CHANGED
|
@@ -115,11 +115,43 @@ class BrokerDriver(str, Enum):
|
|
|
115
115
|
RabbitMQ = "rabbitmq"
|
|
116
116
|
NATS = "nats"
|
|
117
117
|
|
|
118
|
+
@property
|
|
119
|
+
def driver_module_path(self) -> str:
|
|
120
|
+
match self:
|
|
121
|
+
case BrokerDriver.NATS:
|
|
122
|
+
return "infrahub.services.adapters.message_bus.nats"
|
|
123
|
+
case BrokerDriver.RabbitMQ:
|
|
124
|
+
return "infrahub.services.adapters.message_bus.rabbitmq"
|
|
125
|
+
|
|
126
|
+
@property
|
|
127
|
+
def driver_class_name(self) -> str:
|
|
128
|
+
match self:
|
|
129
|
+
case BrokerDriver.NATS:
|
|
130
|
+
return "NATSMessageBus"
|
|
131
|
+
case BrokerDriver.RabbitMQ:
|
|
132
|
+
return "RabbitMQMessageBus"
|
|
133
|
+
|
|
118
134
|
|
|
119
135
|
class CacheDriver(str, Enum):
|
|
120
136
|
Redis = "redis"
|
|
121
137
|
NATS = "nats"
|
|
122
138
|
|
|
139
|
+
@property
|
|
140
|
+
def driver_module_path(self) -> str:
|
|
141
|
+
match self:
|
|
142
|
+
case CacheDriver.NATS:
|
|
143
|
+
return "infrahub.services.adapters.cache.nats"
|
|
144
|
+
case CacheDriver.Redis:
|
|
145
|
+
return "infrahub.services.adapters.cache.redis"
|
|
146
|
+
|
|
147
|
+
@property
|
|
148
|
+
def driver_class_name(self) -> str:
|
|
149
|
+
match self:
|
|
150
|
+
case CacheDriver.NATS:
|
|
151
|
+
return "NATSCache"
|
|
152
|
+
case CacheDriver.Redis:
|
|
153
|
+
return "RedisCache"
|
|
154
|
+
|
|
123
155
|
|
|
124
156
|
class WorkflowDriver(str, Enum):
|
|
125
157
|
LOCAL = "local"
|
infrahub/core/branch/tasks.py
CHANGED
|
@@ -18,6 +18,7 @@ from infrahub.core.diff.coordinator import DiffCoordinator
|
|
|
18
18
|
from infrahub.core.diff.ipam_diff_parser import IpamDiffParser
|
|
19
19
|
from infrahub.core.diff.merger.merger import DiffMerger
|
|
20
20
|
from infrahub.core.diff.model.path import BranchTrackingId, EnrichedDiffRoot, EnrichedDiffRootMetadata
|
|
21
|
+
from infrahub.core.diff.models import RequestDiffUpdate
|
|
21
22
|
from infrahub.core.diff.repository.repository import DiffRepository
|
|
22
23
|
from infrahub.core.merge import BranchMerger
|
|
23
24
|
from infrahub.core.migrations.schema.models import SchemaApplyMigrationData
|
|
@@ -32,15 +33,16 @@ from infrahub.events.models import EventMeta, InfrahubEvent
|
|
|
32
33
|
from infrahub.events.node_action import get_node_event
|
|
33
34
|
from infrahub.exceptions import BranchNotFoundError, MergeFailedError, ValidationError
|
|
34
35
|
from infrahub.graphql.mutations.models import BranchCreateModel # noqa: TC001
|
|
35
|
-
from infrahub.log import get_log_data
|
|
36
|
-
from infrahub.message_bus import Meta, messages
|
|
37
36
|
from infrahub.services import InfrahubServices # noqa: TC001 needed for prefect flow
|
|
38
|
-
from infrahub.worker import WORKER_IDENTITY
|
|
39
37
|
from infrahub.workflows.catalogue import (
|
|
40
38
|
BRANCH_CANCEL_PROPOSED_CHANGES,
|
|
39
|
+
BRANCH_MERGE_POST_PROCESS,
|
|
41
40
|
DIFF_REFRESH_ALL,
|
|
41
|
+
DIFF_UPDATE,
|
|
42
42
|
GIT_REPOSITORIES_CREATE_BRANCH,
|
|
43
43
|
IPAM_RECONCILIATION,
|
|
44
|
+
TRIGGER_ARTIFACT_DEFINITION_GENERATE,
|
|
45
|
+
TRIGGER_GENERATOR_DEFINITION_RUN,
|
|
44
46
|
)
|
|
45
47
|
from infrahub.workflows.utils import add_tags
|
|
46
48
|
|
|
@@ -271,15 +273,11 @@ async def merge_branch(
|
|
|
271
273
|
# NOTE: we still need to convert this event and potentially pull
|
|
272
274
|
# some tasks currently executed based on the event into this workflow
|
|
273
275
|
# -------------------------------------------------------------
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
message = messages.EventBranchMerge(
|
|
277
|
-
source_branch=obj.name,
|
|
278
|
-
target_branch=registry.default_branch,
|
|
276
|
+
await service.workflow.submit_workflow(
|
|
277
|
+
workflow=BRANCH_MERGE_POST_PROCESS,
|
|
279
278
|
context=context,
|
|
280
|
-
|
|
279
|
+
parameters={"source_branch": obj.name, "target_branch": registry.default_branch},
|
|
281
280
|
)
|
|
282
|
-
await service.message_bus.send(message=message)
|
|
283
281
|
|
|
284
282
|
events: list[InfrahubEvent] = [merge_event]
|
|
285
283
|
|
|
@@ -412,3 +410,45 @@ async def _get_diff_root(
|
|
|
412
410
|
)
|
|
413
411
|
|
|
414
412
|
return default_branch_diff
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
@flow(
|
|
416
|
+
name="branch-merge-post-process",
|
|
417
|
+
flow_run_name="Run additional tasks after merging {source_branch} in {target_branch}",
|
|
418
|
+
)
|
|
419
|
+
async def post_process_branch_merge(
|
|
420
|
+
source_branch: str, target_branch: str, context: InfrahubContext, service: InfrahubServices
|
|
421
|
+
) -> None:
|
|
422
|
+
async with service.database.start_session() as db:
|
|
423
|
+
await add_tags(branches=[source_branch])
|
|
424
|
+
log = get_run_logger()
|
|
425
|
+
log.info(f"Running additional tasks after merging {source_branch} within {target_branch}")
|
|
426
|
+
|
|
427
|
+
component_registry = get_component_registry()
|
|
428
|
+
default_branch = registry.get_branch_from_registry()
|
|
429
|
+
diff_repository = await component_registry.get_component(DiffRepository, db=db, branch=default_branch)
|
|
430
|
+
# send diff update requests for every branch-tracking diff
|
|
431
|
+
branch_diff_roots = await diff_repository.get_roots_metadata(base_branch_names=[target_branch])
|
|
432
|
+
|
|
433
|
+
await service.workflow.submit_workflow(
|
|
434
|
+
workflow=TRIGGER_ARTIFACT_DEFINITION_GENERATE,
|
|
435
|
+
context=context,
|
|
436
|
+
parameters={"branch": target_branch},
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
await service.workflow.submit_workflow(
|
|
440
|
+
workflow=TRIGGER_GENERATOR_DEFINITION_RUN,
|
|
441
|
+
context=context,
|
|
442
|
+
parameters={"branch": target_branch},
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
for diff_root in branch_diff_roots:
|
|
446
|
+
if (
|
|
447
|
+
diff_root.base_branch_name != diff_root.diff_branch_name
|
|
448
|
+
and diff_root.tracking_id
|
|
449
|
+
and isinstance(diff_root.tracking_id, BranchTrackingId)
|
|
450
|
+
):
|
|
451
|
+
request_diff_update_model = RequestDiffUpdate(branch_name=diff_root.diff_branch_name)
|
|
452
|
+
await service.workflow.submit_workflow(
|
|
453
|
+
workflow=DIFF_UPDATE, context=context, parameters={"model": request_diff_update_model}
|
|
454
|
+
)
|
|
@@ -348,6 +348,7 @@ RESTRICTED_NAMESPACES: list[str] = [
|
|
|
348
348
|
NODE_NAME_REGEX = r"^[A-Z][a-zA-Z0-9]+$"
|
|
349
349
|
DEFAULT_NAME_MIN_LENGTH = 2
|
|
350
350
|
NAME_REGEX = r"^[a-z0-9\_]+$"
|
|
351
|
+
NAME_REGEX_OR_EMPTY = r"^[a-z0-9\_]*$"
|
|
351
352
|
DEFAULT_DESCRIPTION_LENGTH = 128
|
|
352
353
|
|
|
353
354
|
DEFAULT_NAME_MAX_LENGTH = 32
|
|
@@ -23,10 +23,15 @@ class NodeConstraintRunner:
|
|
|
23
23
|
self.uniqueness_constraint = uniqueness_constraint
|
|
24
24
|
self.relationship_manager_constraints = relationship_manager_constraints
|
|
25
25
|
|
|
26
|
-
async def check(
|
|
26
|
+
async def check(
|
|
27
|
+
self, node: Node, field_filters: list[str] | None = None, skip_uniqueness_check: bool = False
|
|
28
|
+
) -> None:
|
|
27
29
|
async with self.db.start_session() as db:
|
|
28
30
|
await node.resolve_relationships(db=db)
|
|
29
31
|
|
|
32
|
+
if not skip_uniqueness_check:
|
|
33
|
+
await self.uniqueness_constraint.check(node, filters=field_filters)
|
|
34
|
+
|
|
30
35
|
for relationship_name in node.get_schema().relationship_names:
|
|
31
36
|
if field_filters and relationship_name not in field_filters:
|
|
32
37
|
continue
|
|
@@ -34,7 +39,3 @@ class NodeConstraintRunner:
|
|
|
34
39
|
await relationship_manager.fetch_relationship_ids(db=db, force_refresh=True)
|
|
35
40
|
for relationship_constraint in self.relationship_manager_constraints:
|
|
36
41
|
await relationship_constraint.check(relm=relationship_manager, node_schema=node.get_schema())
|
|
37
|
-
|
|
38
|
-
# If HFID constraint is the only constraint violated, all other constraints need to have ran before,
|
|
39
|
-
# as it means there is an existing node that we might want to update in the case of an upsert
|
|
40
|
-
await self.uniqueness_constraint.check(node, filters=field_filters)
|
infrahub/core/graph/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
GRAPH_VERSION =
|
|
1
|
+
GRAPH_VERSION = 26
|
|
@@ -26,6 +26,8 @@ from .m021_missing_hierarchy_merge import Migration021
|
|
|
26
26
|
from .m022_add_generate_template_attr import Migration022
|
|
27
27
|
from .m023_deduplicate_cardinality_one_relationships import Migration023
|
|
28
28
|
from .m024_missing_hierarchy_backfill import Migration024
|
|
29
|
+
from .m025_uniqueness_nulls import Migration025
|
|
30
|
+
from .m026_0000_prefix_fix import Migration026
|
|
29
31
|
|
|
30
32
|
if TYPE_CHECKING:
|
|
31
33
|
from infrahub.core.root import Root
|
|
@@ -57,6 +59,8 @@ MIGRATIONS: list[type[GraphMigration | InternalSchemaMigration | ArbitraryMigrat
|
|
|
57
59
|
Migration022,
|
|
58
60
|
Migration023,
|
|
59
61
|
Migration024,
|
|
62
|
+
Migration025,
|
|
63
|
+
Migration026,
|
|
60
64
|
]
|
|
61
65
|
|
|
62
66
|
|