infrahub-server 1.2.11__py3-none-any.whl → 1.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,83 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any, Sequence
4
+
5
+ from infrahub.core.migrations.shared import GraphMigration, MigrationResult
6
+ from infrahub.log import get_logger
7
+
8
+ from ...query import Query, QueryType
9
+
10
+ if TYPE_CHECKING:
11
+ from infrahub.database import InfrahubDatabase
12
+
13
+ log = get_logger()
14
+
15
+
16
+ class DeletePosthumousEdges(Query):
17
+ name = "delete_posthumous_edges_query"
18
+ type = QueryType.WRITE
19
+ insert_return = False
20
+
21
+ async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
22
+ query = """
23
+ // ------------
24
+ // find deleted nodes
25
+ // ------------
26
+ MATCH (n:Node)-[e:IS_PART_OF]->(:Root)
27
+ WHERE e.status = "deleted" OR e.to IS NOT NULL
28
+ WITH DISTINCT n, e.branch AS delete_branch, e.branch_level AS delete_branch_level, CASE
29
+ WHEN e.status = "deleted" THEN e.from
30
+ ELSE e.to
31
+ END AS delete_time
32
+ // ------------
33
+ // find the edges added to the deleted node after the delete time
34
+ // ------------
35
+ MATCH (n)-[added_e]-(peer)
36
+ WHERE added_e.from > delete_time
37
+ AND type(added_e) <> "IS_PART_OF"
38
+ // if the node was deleted on a branch (delete_branch_level > 1), and then updated on main/global (added_e.branch_level = 1), we can ignore it
39
+ AND added_e.branch_level >= delete_branch_level
40
+ AND (added_e.branch = delete_branch OR delete_branch_level = 1)
41
+ WITH DISTINCT n, delete_branch, delete_time, added_e, peer
42
+ // ------------
43
+ // get the branched_from for the branch on which the node was deleted
44
+ // ------------
45
+ CALL {
46
+ WITH added_e
47
+ MATCH (b:Branch {name: added_e.branch})
48
+ RETURN b.branched_from AS added_e_branched_from
49
+ }
50
+ // ------------
51
+ // account for the following situations, given that the edge update time is after the node delete time
52
+ // - deleted on main/global, updated on branch
53
+ // - illegal if the delete is before branch.branched_from
54
+ // - deleted on branch, updated on branch
55
+ // - illegal
56
+ // ------------
57
+ WITH n, delete_branch, delete_time, added_e, peer
58
+ WHERE delete_branch = added_e.branch
59
+ OR delete_time < added_e_branched_from
60
+ DELETE added_e
61
+ // --------------
62
+ // the peer _should_ only be an Attribute, but I want to make sure we don't
63
+ // inadvertently delete Root or an AttributeValue or a Boolean
64
+ // --------------
65
+ WITH peer
66
+ WHERE "Attribute" IN labels(peer)
67
+ DETACH DELETE peer
68
+ """
69
+ self.add_to_query(query)
70
+
71
+
72
+ class Migration030(GraphMigration):
73
+ """
74
+ Edges could have been added to Nodes after the Node was deleted, so we need to hard-delete those illegal edges
75
+ """
76
+
77
+ name: str = "030_delete_illegal_edges"
78
+ minimum_version: int = 29
79
+ queries: Sequence[type[Query]] = [DeletePosthumousEdges]
80
+
81
+ async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
82
+ result = MigrationResult()
83
+ return result
@@ -63,28 +63,32 @@ class AttributeAddQuery(Query):
63
63
  MATCH p = (n:%(node_kind)s)
64
64
  CALL {
65
65
  WITH n
66
- MATCH (root:Root)<-[r1:IS_PART_OF]-(n)
67
- OPTIONAL MATCH (n)-[r2:HAS_ATTRIBUTE]-(:Attribute { name: $attr_name })
68
- WHERE all(r in [r1, r2] WHERE (%(branch_filter)s))
69
- RETURN n as n1, r1 as r11, r2 as r12
70
- ORDER BY r2.branch_level DESC, r2.from ASC, r1.branch_level DESC, r1.from ASC
66
+ MATCH (:Root)<-[r:IS_PART_OF]-(n)
67
+ WHERE %(branch_filter)s
68
+ WITH n, r AS is_part_of_e
69
+ OPTIONAL MATCH (n)-[r:HAS_ATTRIBUTE]-(:Attribute { name: $attr_name })
70
+ WHERE %(branch_filter)s
71
+ WITH is_part_of_e, r AS has_attr_e
72
+ RETURN is_part_of_e, has_attr_e
73
+ ORDER BY has_attr_e.branch_level DESC, has_attr_e.from ASC, is_part_of_e.branch_level DESC, is_part_of_e.from ASC
71
74
  LIMIT 1
72
75
  }
73
- WITH n1 as n, r11 as r1, r12 as r2, av, is_protected_value, is_visible_value
74
- WHERE r1.status = "active" AND (r2 IS NULL OR r2.status = "deleted")
76
+ WITH n, is_part_of_e, has_attr_e, av, is_protected_value, is_visible_value
77
+ WHERE is_part_of_e.status = "active" AND (has_attr_e IS NULL OR has_attr_e.status = "deleted")
75
78
  CREATE (a:Attribute { name: $attr_name, branch_support: $branch_support })
76
79
  CREATE (n)-[:HAS_ATTRIBUTE $rel_props ]->(a)
77
80
  CREATE (a)-[:HAS_VALUE $rel_props ]->(av)
78
81
  CREATE (a)-[:IS_PROTECTED $rel_props]->(is_protected_value)
79
82
  CREATE (a)-[:IS_VISIBLE $rel_props]->(is_visible_value)
80
83
  %(uuid_generation)s
81
- FOREACH (i in CASE WHEN r2.status = "deleted" THEN [1] ELSE [] END |
82
- SET r2.to = $current_time
84
+ FOREACH (i in CASE WHEN has_attr_e.status = "deleted" THEN [1] ELSE [] END |
85
+ SET has_attr_e.to = $current_time
83
86
  )
84
87
  """ % {
85
88
  "branch_filter": branch_filter,
86
89
  "node_kind": self.node_kind,
87
90
  "uuid_generation": db.render_uuid_generation(node_label="a", node_attr="uuid"),
88
91
  }
92
+
89
93
  self.add_to_query(query)
90
94
  self.return_labels = ["n.uuid", "a.uuid"]
@@ -1957,7 +1957,11 @@ class SchemaBranch:
1957
1957
  )
1958
1958
 
1959
1959
  parent_hfid = f"{relationship.name}__template_name__value"
1960
- if relationship.kind == RelationshipKind.PARENT and parent_hfid not in template_schema.human_friendly_id:
1960
+ if (
1961
+ not isinstance(template_schema, GenericSchema)
1962
+ and relationship.kind == RelationshipKind.PARENT
1963
+ and parent_hfid not in template_schema.human_friendly_id
1964
+ ):
1961
1965
  template_schema.human_friendly_id = [parent_hfid] + template_schema.human_friendly_id
1962
1966
  template_schema.uniqueness_constraints[0].append(relationship.name)
1963
1967
 
@@ -1992,7 +1996,6 @@ class SchemaBranch:
1992
1996
  include_in_menu=False,
1993
1997
  display_labels=["template_name__value"],
1994
1998
  human_friendly_id=["template_name__value"],
1995
- uniqueness_constraints=[["template_name__value"]],
1996
1999
  attributes=[template_name_attr],
1997
2000
  )
1998
2001
 
@@ -2011,7 +2014,6 @@ class SchemaBranch:
2011
2014
  human_friendly_id=["template_name__value"],
2012
2015
  uniqueness_constraints=[["template_name__value"]],
2013
2016
  inherit_from=[InfrahubKind.LINEAGESOURCE, InfrahubKind.NODE, core_template_schema.kind],
2014
- default_filter="template_name__value",
2015
2017
  attributes=[template_name_attr],
2016
2018
  relationships=[
2017
2019
  RelationshipSchema(
@@ -0,0 +1,101 @@
1
+ from infrahub.database import InfrahubDatabase
2
+
3
+
4
+ async def verify_no_duplicate_relationships(db: InfrahubDatabase) -> None:
5
+ """
6
+ Verify that no duplicate active relationships exist at the database level
7
+ A duplicate is defined as
8
+ - connecting the same two nodes
9
+ - having the same identifier
10
+ - having the same direction (inbound, outbound, bidirectional)
11
+ - having the same branch
12
+ A more thorough check that no duplicates exist at any point in time is possible, but more complex
13
+ """
14
+ query = """
15
+ MATCH (a:Node)-[e1:IS_RELATED {status: "active"}]-(rel:Relationship)-[e2:IS_RELATED {branch: e1.branch, status: "active"}]-(b:Node)
16
+ WHERE a.uuid <> b.uuid
17
+ AND e1.to IS NULL
18
+ AND e2.to IS NULL
19
+ WITH a, rel.name AS rel_name, b, e1.branch AS branch, CASE
20
+ WHEN startNode(e1) = a AND startNode(e2) = rel THEN "out"
21
+ WHEN startNode(e1) = rel AND startNode(e2) = b THEN "in"
22
+ ELSE "bidir"
23
+ END AS direction, COUNT(*) AS num_duplicates
24
+ WHERE num_duplicates > 1
25
+ RETURN a.uuid AS node_id1, b.uuid AS node_id2, rel_name, branch, direction, num_duplicates
26
+ """
27
+ results = await db.execute_query(query=query)
28
+ for result in results:
29
+ node_id1 = result.get("node_id1")
30
+ node_id2 = result.get("node_id2")
31
+ rel_name = result.get("rel_name")
32
+ branch = result.get("branch")
33
+ direction = result.get("direction")
34
+ num_duplicates = result.get("num_duplicates")
35
+ raise ValueError(
36
+ f"{num_duplicates} duplicate relationships ({branch=},{direction=}) between nodes '{node_id1}' and '{node_id2}'"
37
+ f" with relationship name '{rel_name}'"
38
+ )
39
+
40
+
41
+ async def verify_no_edges_added_after_node_delete(db: InfrahubDatabase) -> None:
42
+ """
43
+ Verify that no edges are added to a Node after it is deleted on a given branch
44
+ """
45
+ query = """
46
+ // ------------
47
+ // find deleted nodes
48
+ // ------------
49
+ MATCH (n:Node)-[e:IS_PART_OF]->(:Root)
50
+ WHERE e.status = "deleted" OR e.to IS NOT NULL
51
+ WITH DISTINCT n, e.branch AS delete_branch, e.branch_level AS delete_branch_level, CASE
52
+ WHEN e.status = "deleted" THEN e.from
53
+ ELSE e.to
54
+ END AS delete_time
55
+ // ------------
56
+ // find the edges added to the deleted node after the delete time
57
+ // ------------
58
+ MATCH (n)-[added_e]-(peer)
59
+ WHERE added_e.from > delete_time
60
+ AND type(added_e) <> "IS_PART_OF"
61
+ // if the node was deleted on a branch (delete_branch_level > 1), and then updated on main/global (added_e.branch_level = 1), we can ignore it
62
+ AND added_e.branch_level >= delete_branch_level
63
+ AND (added_e.branch = delete_branch OR delete_branch_level = 1)
64
+ WITH DISTINCT n, delete_branch, delete_time, added_e, peer AS added_peer
65
+ // ------------
66
+ // get the branched_from for the branch on which the node was deleted
67
+ // ------------
68
+ CALL {
69
+ WITH added_e
70
+ MATCH (b:Branch {name: added_e.branch})
71
+ RETURN b.branched_from AS added_e_branched_from
72
+ }
73
+ // ------------
74
+ // account for the following situations, given that the edge update time is after the node delete time
75
+ // - deleted on main/global, updated on branch
76
+ // - illegal if the delete is before branch.branched_from
77
+ // - deleted on branch, updated on branch
78
+ // - illegal
79
+ // ------------
80
+ WITH n, delete_branch, delete_time, added_e, added_peer
81
+ WHERE delete_branch = added_e.branch
82
+ OR delete_time < added_e_branched_from
83
+ RETURN n.uuid AS n_uuid, delete_branch, delete_time, added_e, added_peer
84
+ """
85
+ results = await db.execute_query(query=query)
86
+ error_messages = []
87
+ for result in results:
88
+ n_uuid = result.get("n_uuid")
89
+ delete_branch = result.get("delete_branch")
90
+ delete_time = result.get("delete_time")
91
+ added_e = result.get("added_e")
92
+ added_e_branch = added_e.get("branch")
93
+ added_e_from = added_e.get("from")
94
+ added_peer = result.get("added_peer")
95
+ message = (
96
+ f"Node {n_uuid} was deleted on {delete_branch} at {delete_time} but has an {added_e.type} edge added on"
97
+ f" branch {added_e_branch} at {added_e_from} to {added_peer.element_id}"
98
+ )
99
+ error_messages.append(message)
100
+ if error_messages:
101
+ raise ValueError(error_messages)
@@ -533,7 +533,7 @@ class InfrahubMutationMixin:
533
533
  except HFIDViolatedError as exc:
534
534
  # Only the HFID constraint has been violated, it means the node exists and we can update without rerunning constraints
535
535
  if len(exc.matching_nodes_ids) > 1:
536
- raise RuntimeError(f"Multiple {schema_name} nodes have the same hfid (database corrupted)") from exc
536
+ raise RuntimeError(f"Multiple {schema_name} nodes have the same hfid") from exc
537
537
  node_id = list(exc.matching_nodes_ids)[0]
538
538
  node = await NodeManager.get_one(db=db, id=node_id, kind=schema_name, branch=branch, raise_on_error=True)
539
539
  updated_obj, mutation = await cls._call_mutate_update(
@@ -26,6 +26,6 @@ async def rebased_branch(message: messages.RefreshRegistryRebasedBranch, service
26
26
 
27
27
  async with lock.registry.local_schema_lock():
28
28
  service.log.info("Refreshing rebased branch")
29
- registry.branch[message.branch] = await registry.branch_object.get_by_name(
30
- name=message.branch, db=service.database
31
- )
29
+
30
+ async with service.database.start_session(read_only=True) as db:
31
+ registry.branch[message.branch] = await registry.branch_object.get_by_name(name=message.branch, db=db)
@@ -105,11 +105,11 @@ async def pipeline(message: messages.RequestProposedChangePipeline, service: Inf
105
105
 
106
106
  await _gather_repository_repository_diffs(repositories=repositories, service=service)
107
107
 
108
- async with service.database.start_transaction() as dbt:
109
- destination_branch = await registry.get_branch(db=dbt, branch=message.destination_branch)
110
- source_branch = await registry.get_branch(db=dbt, branch=message.source_branch)
108
+ async with service.database.start_session() as dbs:
109
+ destination_branch = await registry.get_branch(db=dbs, branch=message.destination_branch)
110
+ source_branch = await registry.get_branch(db=dbs, branch=message.source_branch)
111
111
  component_registry = get_component_registry()
112
- diff_coordinator = await component_registry.get_component(DiffCoordinator, db=dbt, branch=source_branch)
112
+ diff_coordinator = await component_registry.get_component(DiffCoordinator, db=dbs, branch=source_branch)
113
113
  await diff_coordinator.update_branch_diff(base_branch=destination_branch, diff_branch=source_branch)
114
114
 
115
115
  diff_summary = await service.client.get_diff_summary(branch=message.source_branch)
@@ -4,7 +4,7 @@ from .base import PatchQuery
4
4
 
5
5
  class DeleteDuplicatedEdgesPatchQuery(PatchQuery):
6
6
  """
7
- Find duplicated or overlapping edges of the same status, type, and branch to update and delete
7
+ For all Node vertices, find duplicated or overlapping edges of the same status, type, and branch to update and delete
8
8
  - one edge will be kept for each pair of nodes and a given status, type, and branch. it will be
9
9
  updated to have the earliest "from" and "to" times in this group
10
10
  - all the other duplicate/overlapping edges will be deleted
@@ -17,9 +17,9 @@ class DeleteDuplicatedEdgesPatchQuery(PatchQuery):
17
17
  async def plan(self) -> PatchPlan:
18
18
  query = """
19
19
  // ------------
20
- // Find node pairs that have duplicate edges
20
+ // Find vertex pairs that have duplicate edges
21
21
  // ------------
22
- MATCH (node_with_dup_edges:Node)-[edge]->(peer)
22
+ MATCH (node_with_dup_edges:Node)-[edge]-(peer)
23
23
  WITH node_with_dup_edges, type(edge) AS edge_type, edge.status AS edge_status, edge.branch AS edge_branch, peer, count(*) AS num_dup_edges
24
24
  WHERE num_dup_edges > 1
25
25
  WITH DISTINCT node_with_dup_edges, edge_type, edge_branch, peer
@@ -28,12 +28,12 @@ CALL {
28
28
  // Get the earliest active and deleted edges for this branch
29
29
  // ------------
30
30
  WITH node_with_dup_edges, edge_type, edge_branch, peer
31
- MATCH (node_with_dup_edges)-[active_edge {branch: edge_branch, status: "active"}]->(peer)
31
+ OPTIONAL MATCH (node_with_dup_edges)-[active_edge {branch: edge_branch, status: "active"}]-(peer)
32
32
  WHERE type(active_edge) = edge_type
33
33
  WITH node_with_dup_edges, edge_type, edge_branch, peer, active_edge
34
34
  ORDER BY active_edge.from ASC
35
35
  WITH node_with_dup_edges, edge_type, edge_branch, peer, head(collect(active_edge.from)) AS active_from
36
- OPTIONAL MATCH (node_with_dup_edges)-[deleted_edge {branch: edge_branch, status: "deleted"}]->(peer)
36
+ OPTIONAL MATCH (node_with_dup_edges)-[deleted_edge {branch: edge_branch, status: "deleted"}]-(peer)
37
37
  WITH node_with_dup_edges, edge_type, edge_branch, peer, active_from, deleted_edge
38
38
  ORDER BY deleted_edge.from ASC
39
39
  WITH node_with_dup_edges, edge_type, edge_branch, peer, active_from, head(collect(deleted_edge.from)) AS deleted_from
@@ -42,74 +42,85 @@ CALL {
42
42
  // ------------
43
43
  CALL {
44
44
  WITH node_with_dup_edges, edge_type, edge_branch, peer, active_from, deleted_from
45
- MATCH (node_with_dup_edges)-[active_e {branch: edge_branch, status: "active"}]->(peer)
45
+ OPTIONAL MATCH (node_with_dup_edges)-[active_e {branch: edge_branch, status: "active"}]-(peer)
46
46
  WHERE type(active_e) = edge_type
47
47
  WITH node_with_dup_edges, edge_type, edge_branch, peer, active_from, deleted_from, active_e
48
48
  ORDER BY %(id_func_name)s(active_e)
49
49
  LIMIT 1
50
50
  WITH active_e, properties(active_e) AS before_props, {from: active_from, to: deleted_from} AS prop_updates
51
- RETURN [
52
- {
53
- db_id: %(id_func_name)s(active_e), before_props: before_props, prop_updates: prop_updates
54
- }
55
- ] AS active_edges_to_update
51
+ RETURN CASE
52
+ WHEN active_e IS NOT NULL THEN [
53
+ {
54
+ db_id: %(id_func_name)s(active_e), before_props: before_props, prop_updates: prop_updates
55
+ }
56
+ ]
57
+ ELSE []
58
+ END AS active_edges_to_update
56
59
  }
57
60
  // ------------
58
61
  // Plan deletes for all the other active edges of this type on this branch
59
62
  // ------------
60
63
  CALL {
61
64
  WITH node_with_dup_edges, edge_type, edge_branch, peer
62
- MATCH (node_with_dup_edges)-[active_e {branch: edge_branch, status: "active"}]->(peer)
65
+ OPTIONAL MATCH (node_with_dup_edges)-[active_e {branch: edge_branch, status: "active"}]-(peer)
63
66
  WHERE type(active_e) = edge_type
64
67
  WITH node_with_dup_edges, peer, active_e
65
68
  ORDER BY %(id_func_name)s(active_e)
66
69
  SKIP 1
67
- RETURN collect(
68
- {
70
+ WITH CASE
71
+ WHEN active_e IS NOT NULL THEN {
69
72
  db_id: %(id_func_name)s(active_e),
70
- from_id: %(id_func_name)s(node_with_dup_edges),
71
- to_id: %(id_func_name)s(peer),
73
+ from_id: %(id_func_name)s(startNode(active_e)),
74
+ to_id: %(id_func_name)s(endNode(active_e)),
72
75
  edge_type: type(active_e),
73
76
  before_props: properties(active_e)
74
77
  }
75
- ) AS active_edges_to_delete
78
+ ELSE NULL
79
+ END AS serialized_edge
80
+ RETURN collect(serialized_edge) AS active_edges_to_delete
76
81
  }
77
82
  // ------------
78
83
  // Plan one deleted edge update with correct from time
79
84
  // ------------
80
85
  CALL {
81
86
  WITH node_with_dup_edges, edge_type, edge_branch, peer, deleted_from
82
- MATCH (node_with_dup_edges)-[deleted_e {branch: edge_branch, status: "deleted"}]->(peer)
87
+ OPTIONAL MATCH (node_with_dup_edges)-[deleted_e {branch: edge_branch, status: "deleted"}]-(peer)
83
88
  WHERE type(deleted_e) = edge_type
84
89
  WITH node_with_dup_edges, edge_type, edge_branch, peer, deleted_from, deleted_e
85
90
  ORDER BY %(id_func_name)s(deleted_e)
86
91
  LIMIT 1
87
92
  WITH deleted_e, properties(deleted_e) AS before_props, {from: deleted_from} AS prop_updates
88
- RETURN [
89
- {
90
- db_id: %(id_func_name)s(deleted_e), before_props: before_props, prop_updates: prop_updates
91
- }
92
- ] AS deleted_edges_to_update
93
+ RETURN CASE
94
+ WHEN deleted_e IS NOT NULL THEN [
95
+ {
96
+ db_id: %(id_func_name)s(deleted_e), before_props: before_props, prop_updates: prop_updates
97
+ }
98
+ ]
99
+ ELSE []
100
+ END AS deleted_edges_to_update
93
101
  }
94
102
  // ------------
95
103
  // Plan deletes for all the other deleted edges of this type on this branch
96
104
  // ------------
97
105
  CALL {
98
106
  WITH node_with_dup_edges, edge_type, edge_branch, peer
99
- MATCH (node_with_dup_edges)-[deleted_e {branch: edge_branch, status: "deleted"}]->(peer)
107
+ OPTIONAL MATCH (node_with_dup_edges)-[deleted_e {branch: edge_branch, status: "deleted"}]-(peer)
100
108
  WHERE type(deleted_e) = edge_type
101
109
  WITH node_with_dup_edges, peer, deleted_e
102
110
  ORDER BY %(id_func_name)s(deleted_e)
103
111
  SKIP 1
104
- RETURN collect(
105
- {
112
+ WITH CASE
113
+ WHEN deleted_e IS NOT NULL THEN {
106
114
  db_id: %(id_func_name)s(deleted_e),
107
- from_id: %(id_func_name)s(node_with_dup_edges),
108
- to_id: %(id_func_name)s(peer),
115
+ from_id: %(id_func_name)s(startNode(deleted_e)),
116
+ to_id: %(id_func_name)s(endNode(deleted_e)),
109
117
  edge_type: type(deleted_e),
110
118
  before_props: properties(deleted_e)
111
119
  }
112
- ) AS deleted_edges_to_delete
120
+ ELSE NULL
121
+ END AS serialized_edge
122
+
123
+ RETURN collect(serialized_edge) AS deleted_edges_to_delete
113
124
  }
114
125
  RETURN
115
126
  active_edges_to_update + deleted_edges_to_update AS edges_to_update,
infrahub/webhook/tasks.py CHANGED
@@ -111,7 +111,8 @@ async def webhook_process(
111
111
  async def configure_webhook_all(service: InfrahubServices) -> None:
112
112
  log = get_run_logger()
113
113
 
114
- triggers = await gather_trigger_webhook(db=service.database)
114
+ async with service.database.start_session(read_only=True) as db:
115
+ triggers = await gather_trigger_webhook(db=db)
115
116
 
116
117
  async with get_client(sync_client=False) as prefect_client:
117
118
  await setup_triggers(
@@ -1,8 +1,7 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.3
2
2
  Name: infrahub-server
3
- Version: 1.2.11
3
+ Version: 1.2.12
4
4
  Summary: Infrahub is taking a new approach to Infrastructure Management by providing a new generation of datastore to organize and control all the data that defines how an infrastructure should run.
5
- Home-page: https://opsmill.com
6
5
  License: AGPL-3.0-only
7
6
  Author: OpsMill
8
7
  Author-email: info@opsmill.com
@@ -58,6 +57,7 @@ Requires-Dist: ujson (>=5,<6)
58
57
  Requires-Dist: uvicorn[standard] (>=0.32,<0.33)
59
58
  Requires-Dist: whenever (==0.7.2)
60
59
  Project-URL: Documentation, https://docs.infrahub.app/
60
+ Project-URL: Homepage, https://opsmill.com
61
61
  Project-URL: Repository, https://github.com/opsmill/infrahub
62
62
  Description-Content-Type: text/markdown
63
63
 
@@ -55,7 +55,7 @@ infrahub/core/changelog/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG
55
55
  infrahub/core/changelog/diff.py,sha256=0BxCpsgJ-38x5BBz5XDtAvc9FPy82M0NlzXl8nQ-c70,13752
56
56
  infrahub/core/changelog/models.py,sha256=UgfJdOFUkMmjeUKe1mPCO7WE3jNENw0UJU3LWFf20HQ,29920
57
57
  infrahub/core/constants/__init__.py,sha256=Q2pEX_-Z1-TE5OM_PWKIPIvAOxuYzk2cWZnFjemkT2k,8618
58
- infrahub/core/constants/database.py,sha256=lxesWX2z6SZgGok1bAY6_pCBm5rFfu7k4ayMBr6w_Vo,336
58
+ infrahub/core/constants/database.py,sha256=x5tWaT3e0WfCxxrHMcSoHUBMfcUzStLi133CqHjSosU,368
59
59
  infrahub/core/constants/infrahubkind.py,sha256=lrSae8gu4EOXaDHxX1jDkENkfr0fw5t32FP-vdXLrRs,2544
60
60
  infrahub/core/constants/relationship_label.py,sha256=AWbWghu5MoAKg2DBE-ysdzSOXnWoWdBn98zpIHzn_co,87
61
61
  infrahub/core/constants/schema.py,sha256=uuddQniyGlSlvKjM5mQ_V2VhgZmQ8fUCAHysbZLvTEU,2006
@@ -109,7 +109,7 @@ infrahub/core/diff/query/has_conflicts_query.py,sha256=kt0Z606vP2r1g7OqW2RrYj9Lb
109
109
  infrahub/core/diff/query/merge.py,sha256=g16_h5DRc0htTnmuPOujlQEEFpcjVeIAU2li5YrIxfs,33390
110
110
  infrahub/core/diff/query/merge_tracking_id.py,sha256=VLGsKuOCIMYe0I-0r01YHF5iaLYIkfSCVQatHM-ybFA,833
111
111
  infrahub/core/diff/query/roots_metadata.py,sha256=FT-48amqoR2RS4CkfnnXGI7Z5uOL4hm7IdZiz3SFHRo,2182
112
- infrahub/core/diff/query/save.py,sha256=j0YuT_V5kZUHIuD6CXT8bcIVv3fewAckqcuukDYn3EA,21972
112
+ infrahub/core/diff/query/save.py,sha256=fI9UXMLMF38ZyHINyKoD13J6Nlzsmi7gWQ5AW1XYoeI,23424
113
113
  infrahub/core/diff/query/summary_counts_enricher.py,sha256=dSmbmbvLx8SE1DyAAw4mbLyW5BWLbMrYOnEchA2uBZc,10239
114
114
  infrahub/core/diff/query/time_range_query.py,sha256=0pjsFBur8jcSU6su-iA4IMjnHw3RtNWI787wAPcyepI,3003
115
115
  infrahub/core/diff/query/update_conflict_query.py,sha256=kQkFazz88wnApr8UU_qb0ruzhmrhWiqhbErukSAMhLA,1212
@@ -119,7 +119,7 @@ infrahub/core/diff/repository/deserializer.py,sha256=bhN9ao8HxqKyRz273QGLNV9z9_S
119
119
  infrahub/core/diff/repository/repository.py,sha256=b54YvGDaFg3FEDNjOTS-WC3Rvtq4bdpDb1NGEKURnqg,25851
120
120
  infrahub/core/diff/tasks.py,sha256=7_k-ZNcJZsiDp-xCZvCQfPJjg0xRxpaGTiVVNuRPfBI,3322
121
121
  infrahub/core/enums.py,sha256=qGbhRVoH43Xi0iDkUfWdQiKapJbLT9UKsCobFk_paIk,491
122
- infrahub/core/graph/__init__.py,sha256=Oqh1AEiVhTvWd71qOpyM6Qfxj59fjCI5OQE4mxZgDlM,19
122
+ infrahub/core/graph/__init__.py,sha256=2EUPjmgYV1tf8ln4lfXK57uUCRFRuanN9SNHZmXdy24,19
123
123
  infrahub/core/graph/constraints.py,sha256=lmuzrKDFoeSKRiLtycB9PXi6zhMYghczKrPYvfWyy90,10396
124
124
  infrahub/core/graph/index.py,sha256=IHLP-zPRp7HJYLGHMRDRXQp8RC69ztP10Tr5NcL2j4Y,1736
125
125
  infrahub/core/graph/schema.py,sha256=FmEPPb1XOFv3nnS_XJCuUqlp8HsStX5A2frHjlhoqvE,10105
@@ -138,7 +138,7 @@ infrahub/core/ipam/utilization.py,sha256=d-zpXCaWsHgJxBLopCDd7y4sJYvHcIzzpYhbTMI
138
138
  infrahub/core/manager.py,sha256=G_5A2mYkRkxUnDGjdauVdrEX2ZX5HPaHbxEHxDyOb6U,47467
139
139
  infrahub/core/merge.py,sha256=bZvToLKyphJlWMbQAzGuSHcrG2DfeqL69KSfqb1wWdc,10430
140
140
  infrahub/core/migrations/__init__.py,sha256=syPb3-Irf11dXCHgbT0UdmTnEBbpf4wXJ3m8ADYXDpk,1175
141
- infrahub/core/migrations/graph/__init__.py,sha256=510-WYc9fi_DG2H1MgXXcs57QuKDmRyf26Sf87f6qRI,2679
141
+ infrahub/core/migrations/graph/__init__.py,sha256=e0OzJEAn1qvtPghPwan8kWrLzDXX9eaw7YgbFlAZevA,2872
142
142
  infrahub/core/migrations/graph/m001_add_version_to_graph.py,sha256=YcLN6cFjE6IGheXR4Ujb6CcyY8bJ7WE289hcKJaENOc,1515
143
143
  infrahub/core/migrations/graph/m002_attribute_is_default.py,sha256=wB6f2N_ChTvGajqHD-OWCG5ahRMDhhXZuwo79ieq_II,1036
144
144
  infrahub/core/migrations/graph/m003_relationship_parent_optional.py,sha256=fRMmcOmBdHgOEjlf-5TaWsZ1Rzs6op1s75-r_jE_tZ0,2345
@@ -167,8 +167,10 @@ infrahub/core/migrations/graph/m025_uniqueness_nulls.py,sha256=n_g09PDLs1yo3dMYL
167
167
  infrahub/core/migrations/graph/m026_0000_prefix_fix.py,sha256=7sP6nQZrqgzFyRUHKf5fKSX2LrzKEAAsiDsRSu9noJM,1944
168
168
  infrahub/core/migrations/graph/m027_delete_isolated_nodes.py,sha256=aAfDUdhsR05CpehVeyLWQ1tRstgrF0HY2V5V6X5ALxM,1589
169
169
  infrahub/core/migrations/graph/m028_delete_diffs.py,sha256=PwesD95KTTJsNbMX3NK6O_rGLR7hB-GEi7JIaXheiuQ,1397
170
+ infrahub/core/migrations/graph/m029_duplicates_cleanup.py,sha256=erCyHFBPCwRcJsSupZ2Be-JdXIbskTeufKxlFREnGAY,28997
171
+ infrahub/core/migrations/graph/m030_illegal_edges.py,sha256=Wyvn3w4PWw7i-9DyD49C2YeyTGebR7T1sa1HjxOufJU,2804
170
172
  infrahub/core/migrations/query/__init__.py,sha256=JoWOUWlV6IzwxWxObsfCnAAKUOHJkE7dZlOsfB64ZEo,876
171
- infrahub/core/migrations/query/attribute_add.py,sha256=zvOwd9afCtfBpR-rEWePEAnbpoeQorzkcSmD4t8myYA,3510
173
+ infrahub/core/migrations/query/attribute_add.py,sha256=iWZ6sRwvGLKckRWEFnrLphwQjkDF7r5AcIk9LTaltNc,3670
172
174
  infrahub/core/migrations/query/attribute_rename.py,sha256=-p3AInP1dWRO-v-i8MSajDeK5_2LcJwYr2jqLQ_vbgs,6971
173
175
  infrahub/core/migrations/query/delete_element_in_schema.py,sha256=F5m_AM_DGprRClKo_QnkYm49xZVvw_zCDIdNO0oM_QU,7051
174
176
  infrahub/core/migrations/query/node_duplicate.py,sha256=EbS4rvA5YDiX5uguXjBRrArEGnYGKK-6M3yPvrs4PWw,8232
@@ -268,7 +270,7 @@ infrahub/core/schema/manager.py,sha256=4lPjjtE_MtJ0acJdYAJEkuK4jap3NnTdxB5esEB71
268
270
  infrahub/core/schema/node_schema.py,sha256=ld_Wrqf-RsoEUVz_lKE0tcSf5n_oYZYtRI0lTqtd63o,6150
269
271
  infrahub/core/schema/profile_schema.py,sha256=cOPSOt5KLgQ0nbqrAN_o33hY_pUtrKmiwSbY_YpVolI,1092
270
272
  infrahub/core/schema/relationship_schema.py,sha256=lVbyQKMP2jPZZwZGK6DBvXdXfEQEsQGMbZ2WYxOZKTw,8261
271
- infrahub/core/schema/schema_branch.py,sha256=v9wZ92qBGJow6p6E3luD-nvge1FvQV5SvOgVj9Mxre0,97870
273
+ infrahub/core/schema/schema_branch.py,sha256=iEKPbXCbvjT9NOD-iRyYiaSEywy2FLEUt7iJA5VNU5M,97863
272
274
  infrahub/core/schema/schema_branch_computed.py,sha256=14UUsQJDLMHkYhg7QMqeLiTF3PO8c8rGa90ul3F2ZZo,10629
273
275
  infrahub/core/schema/template_schema.py,sha256=O-PBS9IRM4JX6PxeoyZKwqZ0u0SdQ2zxWMc01PJ2_EA,1084
274
276
  infrahub/core/task/__init__.py,sha256=Ied1NvKGJUDmff27z_-yWW8ArenHxGvSvQTaQyx1iHs,128
@@ -317,6 +319,7 @@ infrahub/database/index.py,sha256=ATLqw9Grqbq7haGGm14VSEPmcPniid--YATiffo4sA0,16
317
319
  infrahub/database/memgraph.py,sha256=Fg3xHP9s0MiBBmMvcEmsJvuIUSq8U_XCS362HDE9d1s,1742
318
320
  infrahub/database/metrics.py,sha256=xU4OSKFbsxcw_yZlt_39PmGtF7S7yPbPuOIlSCu5sI0,739
319
321
  infrahub/database/neo4j.py,sha256=ou7PGE9rbcVD4keBEFCDFm30MEexnijbZOo3kqrfW3k,2337
322
+ infrahub/database/validation.py,sha256=lVB9qCCEHd0pEUylD-MmOHXyhRLg-b4wyIEhfAENFtk,4153
320
323
  infrahub/dependencies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
321
324
  infrahub/dependencies/builder/constraint/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
322
325
  infrahub/dependencies/builder/constraint/grouped/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -441,7 +444,7 @@ infrahub/graphql/mutations/diff_conflict.py,sha256=JngQfyKXCVlmtlqQ_VyabmrOEDOEK
441
444
  infrahub/graphql/mutations/generator.py,sha256=Ulw4whZm8Gc8lJjwfUFoFSsR0cOUliFKl87Oca4B9O0,3579
442
445
  infrahub/graphql/mutations/graphql_query.py,sha256=mp_O2byChneCihUrEAFEiIAgJ1gW9WrgtwPetUQmkJw,3562
443
446
  infrahub/graphql/mutations/ipam.py,sha256=wIN8OcTNCHVy32YgatWZi2Of-snFYBd4wlxOAJvE-AY,15961
444
- infrahub/graphql/mutations/main.py,sha256=EgO0U8U0FmgwuTrDrKuafSOWjO7pNR5YZzYYZbQMfec,26611
447
+ infrahub/graphql/mutations/main.py,sha256=LxzNZQeSJ-03t6MZaVrw0bvwI5NlcGSeZGVdyqsuKbE,26590
445
448
  infrahub/graphql/mutations/menu.py,sha256=u2UbOA-TFDRcZRGFkgYTmpGxN2IAUgOvJXd7SnsufyI,3708
446
449
  infrahub/graphql/mutations/models.py,sha256=ilkSLr8OxVO9v3Ra_uDyUTJT9qPOmdPMqQbuWIydJMo,264
447
450
  infrahub/graphql/mutations/node_getter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -533,10 +536,10 @@ infrahub/message_bus/operations/git/__init__.py,sha256=0Fbz1AnU8lWKdX7PS_b0BvjiK
533
536
  infrahub/message_bus/operations/git/file.py,sha256=uW1dXVCMrQNC5-DFrVJ6PvfU47CQ2CQJec-bOXoBkjc,1472
534
537
  infrahub/message_bus/operations/git/repository.py,sha256=eQ6csfEoCbyOTAgoA5mzHerW8-TlbrQaZQ4Htj6qZu8,2432
535
538
  infrahub/message_bus/operations/refresh/__init__.py,sha256=vBuvTL4zRRpOMXATmckQ3bx2GnNwhxicFECA8-8ZZXk,47
536
- infrahub/message_bus/operations/refresh/registry.py,sha256=dfLLISZ8qxwxFbGD5CaNguTicZBmACPMa82q6lAVXsM,1315
539
+ infrahub/message_bus/operations/refresh/registry.py,sha256=JOR-71kiSgEpbEeLJAxcNv7JoV5ORahV_Gr6ytKgZSQ,1357
537
540
  infrahub/message_bus/operations/requests/__init__.py,sha256=7BWa2wc4XSNk13zySOEUdFfcaldSIZT6WXdR6eDxk-U,131
538
541
  infrahub/message_bus/operations/requests/generator_definition.py,sha256=AE2x0NiGoyqD5PYp7XmmjzD23SqNCTyzI8KwcTcVurg,6093
539
- infrahub/message_bus/operations/requests/proposed_change.py,sha256=fILCF3QnsU2c8UAhDyYBjXbfFvj0QF6xbF37A7lLUPM,23044
542
+ infrahub/message_bus/operations/requests/proposed_change.py,sha256=CFxtcoYdmsdt9lKebeD407wEJB-Qo_uR8BB-t3wp-Kk,23040
540
543
  infrahub/message_bus/operations/send/__init__.py,sha256=ivuUTAknLiWfArR44SxA40l0UKVkdHjtDIx0mg06IcE,39
541
544
  infrahub/message_bus/operations/send/echo.py,sha256=m2z_ij7Bucl8u1E1rLAfL3fsrhKZhk_vNIvLqNErIEI,652
542
545
  infrahub/message_bus/types.py,sha256=INOsBhOsPnTSB_6SvMWw1BrnRJZyDgG2c601IjSidgs,4418
@@ -552,8 +555,7 @@ infrahub/patch/plan_reader.py,sha256=uqHNYVBBkpmVIGwaxl2tlMNJd2tPVedNZoSmFSjTdow
552
555
  infrahub/patch/plan_writer.py,sha256=x2u5Oe3ME3zXTdkz5hRnvp2EaQwt-r4LyuSATc2LkKs,4822
553
556
  infrahub/patch/queries/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
554
557
  infrahub/patch/queries/base.py,sha256=wXtv4I-7l_u0kXJFbmFZZJ0_2_5yvuAJwmwiLqRq7AY,338
555
- infrahub/patch/queries/consolidate_duplicated_nodes.py,sha256=R4n6XmACm_wJPd44ZD26VApCavTYtS2QblRvSIeJkDM,3961
556
- infrahub/patch/queries/delete_duplicated_edges.py,sha256=Td5LeNFASt8JH49ufaCZFHT3G8AocvYSw-ZfndqwP1A,6302
558
+ infrahub/patch/queries/delete_duplicated_edges.py,sha256=GjmdbkRTtZzsM2Pr53nm4AHM5MdI6oxhdaumZDcgUKU,6804
557
559
  infrahub/patch/runner.py,sha256=ZB4aOqlG77hJNtDyQtIXmi-2WgM07WSEFtWV2NItIqk,12594
558
560
  infrahub/patch/vertex_adder.py,sha256=lhWELYWlHwkopGOECSHRfj1mb0-COheibsu95r2Hwzs,2796
559
561
  infrahub/patch/vertex_deleter.py,sha256=czdb8T30k_-WSbcZUVS2-DvaN3Dp4j9ss2lAz8KN0mo,1302
@@ -644,7 +646,7 @@ infrahub/visuals.py,sha256=N62G4oOOIYNFpvMjKq7uos-oZAZybGMp57uh5jsVX9w,627
644
646
  infrahub/webhook/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
645
647
  infrahub/webhook/gather.py,sha256=XNaIGiHDiMjjtSjsVbswOze7cLaL0MKJmvSbZBS-WT0,691
646
648
  infrahub/webhook/models.py,sha256=c_6Ng8BeoyUDb4TZInVruB7e8Faq_QQ8-00IVcEBwJM,10031
647
- infrahub/webhook/tasks.py,sha256=kQz0BzOOKUGogHKN2X_tSKYk-7rpHMQ1FKjmGugzEc0,7235
649
+ infrahub/webhook/tasks.py,sha256=O1M60KuxXO_eDHSXMo-Y3w_F58aRQ34F6UlPPYApTfI,7294
648
650
  infrahub/webhook/triggers.py,sha256=v1dzFV4wX0GO2n5hft_qzp-oJOA2P_9Q2eTcSP-i0pk,1574
649
651
  infrahub/worker.py,sha256=JtTM-temURUbpEy-bkKJuTt-GKoiHFDrOe9SyVTIXEM,49
650
652
  infrahub/workers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -761,18 +763,19 @@ infrahub_sdk/uuidt.py,sha256=Tz-4nHkJwbi39UT3gaIe2wJeZNAoBqf6tm3sw7LZbXc,2155
761
763
  infrahub_sdk/yaml.py,sha256=512OKgxAYPt4QLBFlucUB4GgwKJ09dzgC86pO57YFFw,5018
762
764
  infrahub_testcontainers/__init__.py,sha256=oPpmesGgYBSdKTg1L37FGwYBeao1EHury5SJGul-CT8,216
763
765
  infrahub_testcontainers/constants.py,sha256=mZ4hLvcf4rKk9wC7EId4MQxAY0sk4V99deB04N0J2bg,85
764
- infrahub_testcontainers/container.py,sha256=-NccmHKJw8rnGY4nSgqIJdGBrX8eObi9kq7q7mQz1zs,12308
765
- infrahub_testcontainers/docker-compose.test.yml,sha256=dePNK3r5DWVysrFr-t838-9LercZQOIJ8ZYOBWv7Mok,8482
766
+ infrahub_testcontainers/container.py,sha256=CpRLoCQjN37Iv9DpMQX2WHgsDObT7_iNfE6OTWL90OE,19559
767
+ infrahub_testcontainers/docker-compose-cluster.test.yml,sha256=noKd7NLr6GQOn1X4qukvpsRvg15unGS7BfPDG4xkKdM,12057
768
+ infrahub_testcontainers/docker-compose.test.yml,sha256=NgQF0un3LNUWhY0-uKb56Ky9ZmCJP0dRssUPx4qZp-o,8562
766
769
  infrahub_testcontainers/haproxy.cfg,sha256=QUkG2Xu-hKoknPOeYKAkBT_xJH6U9CfIS0DTMFZJsnk,1305
767
- infrahub_testcontainers/helpers.py,sha256=zsvBOql5qM2OX1ybPcklqF-nzWYHkZI3Gk3KZhxWOtU,3578
770
+ infrahub_testcontainers/helpers.py,sha256=a6qRHjeFZetkq0zQj5pgz021AI7XhArMG3ze7ZQuEHk,4288
768
771
  infrahub_testcontainers/host.py,sha256=Z4_gGoGKKeM_HGVS7SdYL1FTNGyLBk8wzicdSKHpfmM,1486
769
772
  infrahub_testcontainers/measurements.py,sha256=gR-uTasSIFCXrwvnNpIpfsQIopKftT7pBiarCgIShaQ,2214
770
773
  infrahub_testcontainers/models.py,sha256=ASYyvl7d_WQz_i7y8-3iab9hwwmCl3OCJavqVbe8nXU,954
771
774
  infrahub_testcontainers/performance_test.py,sha256=hvwiy6tc_lWniYqGkqfOXVGAmA_IV15VOZqbiD9ezno,6149
772
- infrahub_testcontainers/plugin.py,sha256=g24SMg4EAqVe2N8i9F66EV34cNqIdDU4mRP7OeOJO1w,5381
775
+ infrahub_testcontainers/plugin.py,sha256=I3RuZQ0dARyKHuqCf0y1Yj731P2Mwf3BJUehRJKeWrs,5645
773
776
  infrahub_testcontainers/prometheus.yml,sha256=610xQEyj3xuVJMzPkC4m1fRnCrjGpiRBrXA2ytCLa54,599
774
- infrahub_server-1.2.11.dist-info/LICENSE.txt,sha256=TfPDBt3ar0uv_f9cqCDMZ5rIzW3CY8anRRd4PkL6ejs,34522
775
- infrahub_server-1.2.11.dist-info/METADATA,sha256=Yg5ituzKXHWEo3K_-WUf45_fpZU5oVRm8HG6L13B1vE,8194
776
- infrahub_server-1.2.11.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
777
- infrahub_server-1.2.11.dist-info/entry_points.txt,sha256=UXIeFWDsrV-4IllNvUEd6KieYGzQfn9paga2YyABOQI,393
778
- infrahub_server-1.2.11.dist-info/RECORD,,
777
+ infrahub_server-1.2.12.dist-info/LICENSE.txt,sha256=TfPDBt3ar0uv_f9cqCDMZ5rIzW3CY8anRRd4PkL6ejs,34522
778
+ infrahub_server-1.2.12.dist-info/METADATA,sha256=LB7mZe_ufD6CAEphVR-SojpySkkZd75NxwQ1n3AOVPw,8206
779
+ infrahub_server-1.2.12.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
780
+ infrahub_server-1.2.12.dist-info/entry_points.txt,sha256=UXIeFWDsrV-4IllNvUEd6KieYGzQfn9paga2YyABOQI,393
781
+ infrahub_server-1.2.12.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.1
2
+ Generator: poetry-core 2.1.3
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any