infrahub-server 1.2.10__py3-none-any.whl → 1.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. infrahub/config.py +9 -0
  2. infrahub/core/constants/database.py +1 -0
  3. infrahub/core/constants/infrahubkind.py +1 -0
  4. infrahub/core/constraint/node/runner.py +1 -1
  5. infrahub/core/diff/query/save.py +75 -45
  6. infrahub/core/diff/query_parser.py +5 -1
  7. infrahub/core/diff/tasks.py +3 -3
  8. infrahub/core/graph/__init__.py +1 -1
  9. infrahub/core/migrations/graph/__init__.py +6 -0
  10. infrahub/core/migrations/graph/m029_duplicates_cleanup.py +680 -0
  11. infrahub/core/migrations/graph/m030_illegal_edges.py +83 -0
  12. infrahub/core/migrations/query/attribute_add.py +13 -9
  13. infrahub/core/node/resource_manager/ip_address_pool.py +6 -2
  14. infrahub/core/node/resource_manager/ip_prefix_pool.py +6 -2
  15. infrahub/core/protocols.py +4 -0
  16. infrahub/core/query/diff.py +7 -0
  17. infrahub/core/schema/definitions/core/__init__.py +8 -1
  18. infrahub/core/schema/definitions/core/resource_pool.py +20 -0
  19. infrahub/core/schema/schema_branch.py +5 -3
  20. infrahub/core/validators/tasks.py +1 -1
  21. infrahub/database/__init__.py +5 -4
  22. infrahub/database/validation.py +101 -0
  23. infrahub/graphql/app.py +1 -1
  24. infrahub/graphql/loaders/node.py +1 -1
  25. infrahub/graphql/loaders/peers.py +1 -1
  26. infrahub/graphql/mutations/main.py +1 -1
  27. infrahub/graphql/mutations/proposed_change.py +1 -1
  28. infrahub/graphql/queries/relationship.py +1 -1
  29. infrahub/graphql/queries/task.py +10 -0
  30. infrahub/graphql/resolvers/many_relationship.py +4 -4
  31. infrahub/graphql/resolvers/resolver.py +4 -4
  32. infrahub/graphql/resolvers/single_relationship.py +2 -2
  33. infrahub/graphql/subscription/graphql_query.py +2 -2
  34. infrahub/graphql/types/branch.py +1 -1
  35. infrahub/graphql/types/task_log.py +3 -2
  36. infrahub/message_bus/operations/refresh/registry.py +4 -4
  37. infrahub/message_bus/operations/requests/proposed_change.py +4 -4
  38. infrahub/patch/queries/delete_duplicated_edges.py +40 -29
  39. infrahub/task_manager/task.py +44 -4
  40. infrahub/telemetry/database.py +1 -1
  41. infrahub/telemetry/tasks.py +1 -1
  42. infrahub/webhook/tasks.py +2 -1
  43. {infrahub_server-1.2.10.dist-info → infrahub_server-1.2.12.dist-info}/METADATA +3 -3
  44. {infrahub_server-1.2.10.dist-info → infrahub_server-1.2.12.dist-info}/RECORD +52 -49
  45. {infrahub_server-1.2.10.dist-info → infrahub_server-1.2.12.dist-info}/WHEEL +1 -1
  46. infrahub_testcontainers/container.py +239 -64
  47. infrahub_testcontainers/docker-compose-cluster.test.yml +321 -0
  48. infrahub_testcontainers/docker-compose.test.yml +1 -0
  49. infrahub_testcontainers/helpers.py +15 -1
  50. infrahub_testcontainers/plugin.py +9 -0
  51. infrahub/patch/queries/consolidate_duplicated_nodes.py +0 -109
  52. {infrahub_server-1.2.10.dist-info → infrahub_server-1.2.12.dist-info}/LICENSE.txt +0 -0
  53. {infrahub_server-1.2.10.dist-info → infrahub_server-1.2.12.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,83 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any, Sequence
4
+
5
+ from infrahub.core.migrations.shared import GraphMigration, MigrationResult
6
+ from infrahub.log import get_logger
7
+
8
+ from ...query import Query, QueryType
9
+
10
+ if TYPE_CHECKING:
11
+ from infrahub.database import InfrahubDatabase
12
+
13
+ log = get_logger()
14
+
15
+
16
+ class DeletePosthumousEdges(Query):
17
+ name = "delete_posthumous_edges_query"
18
+ type = QueryType.WRITE
19
+ insert_return = False
20
+
21
+ async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
22
+ query = """
23
+ // ------------
24
+ // find deleted nodes
25
+ // ------------
26
+ MATCH (n:Node)-[e:IS_PART_OF]->(:Root)
27
+ WHERE e.status = "deleted" OR e.to IS NOT NULL
28
+ WITH DISTINCT n, e.branch AS delete_branch, e.branch_level AS delete_branch_level, CASE
29
+ WHEN e.status = "deleted" THEN e.from
30
+ ELSE e.to
31
+ END AS delete_time
32
+ // ------------
33
+ // find the edges added to the deleted node after the delete time
34
+ // ------------
35
+ MATCH (n)-[added_e]-(peer)
36
+ WHERE added_e.from > delete_time
37
+ AND type(added_e) <> "IS_PART_OF"
38
+ // if the node was deleted on a branch (delete_branch_level > 1), and then updated on main/global (added_e.branch_level = 1), we can ignore it
39
+ AND added_e.branch_level >= delete_branch_level
40
+ AND (added_e.branch = delete_branch OR delete_branch_level = 1)
41
+ WITH DISTINCT n, delete_branch, delete_time, added_e, peer
42
+ // ------------
43
+ // get the branched_from for the branch on which the node was deleted
44
+ // ------------
45
+ CALL {
46
+ WITH added_e
47
+ MATCH (b:Branch {name: added_e.branch})
48
+ RETURN b.branched_from AS added_e_branched_from
49
+ }
50
+ // ------------
51
+ // account for the following situations, given that the edge update time is after the node delete time
52
+ // - deleted on main/global, updated on branch
53
+ // - illegal if the delete is before branch.branched_from
54
+ // - deleted on branch, updated on branch
55
+ // - illegal
56
+ // ------------
57
+ WITH n, delete_branch, delete_time, added_e, peer
58
+ WHERE delete_branch = added_e.branch
59
+ OR delete_time < added_e_branched_from
60
+ DELETE added_e
61
+ // --------------
62
+ // the peer _should_ only be an Attribute, but I want to make sure we don't
63
+ // inadvertently delete Root or an AttributeValue or a Boolean
64
+ // --------------
65
+ WITH peer
66
+ WHERE "Attribute" IN labels(peer)
67
+ DETACH DELETE peer
68
+ """
69
+ self.add_to_query(query)
70
+
71
+
72
+ class Migration030(GraphMigration):
73
+ """
74
+ Edges could have been added to Nodes after the Node was deleted, so we need to hard-delete those illegal edges
75
+ """
76
+
77
+ name: str = "030_delete_illegal_edges"
78
+ minimum_version: int = 29
79
+ queries: Sequence[type[Query]] = [DeletePosthumousEdges]
80
+
81
+ async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
82
+ result = MigrationResult()
83
+ return result
@@ -63,28 +63,32 @@ class AttributeAddQuery(Query):
63
63
  MATCH p = (n:%(node_kind)s)
64
64
  CALL {
65
65
  WITH n
66
- MATCH (root:Root)<-[r1:IS_PART_OF]-(n)
67
- OPTIONAL MATCH (n)-[r2:HAS_ATTRIBUTE]-(:Attribute { name: $attr_name })
68
- WHERE all(r in [r1, r2] WHERE (%(branch_filter)s))
69
- RETURN n as n1, r1 as r11, r2 as r12
70
- ORDER BY r2.branch_level DESC, r2.from ASC, r1.branch_level DESC, r1.from ASC
66
+ MATCH (:Root)<-[r:IS_PART_OF]-(n)
67
+ WHERE %(branch_filter)s
68
+ WITH n, r AS is_part_of_e
69
+ OPTIONAL MATCH (n)-[r:HAS_ATTRIBUTE]-(:Attribute { name: $attr_name })
70
+ WHERE %(branch_filter)s
71
+ WITH is_part_of_e, r AS has_attr_e
72
+ RETURN is_part_of_e, has_attr_e
73
+ ORDER BY has_attr_e.branch_level DESC, has_attr_e.from ASC, is_part_of_e.branch_level DESC, is_part_of_e.from ASC
71
74
  LIMIT 1
72
75
  }
73
- WITH n1 as n, r11 as r1, r12 as r2, av, is_protected_value, is_visible_value
74
- WHERE r1.status = "active" AND (r2 IS NULL OR r2.status = "deleted")
76
+ WITH n, is_part_of_e, has_attr_e, av, is_protected_value, is_visible_value
77
+ WHERE is_part_of_e.status = "active" AND (has_attr_e IS NULL OR has_attr_e.status = "deleted")
75
78
  CREATE (a:Attribute { name: $attr_name, branch_support: $branch_support })
76
79
  CREATE (n)-[:HAS_ATTRIBUTE $rel_props ]->(a)
77
80
  CREATE (a)-[:HAS_VALUE $rel_props ]->(av)
78
81
  CREATE (a)-[:IS_PROTECTED $rel_props]->(is_protected_value)
79
82
  CREATE (a)-[:IS_VISIBLE $rel_props]->(is_visible_value)
80
83
  %(uuid_generation)s
81
- FOREACH (i in CASE WHEN r2.status = "deleted" THEN [1] ELSE [] END |
82
- SET r2.to = $current_time
84
+ FOREACH (i in CASE WHEN has_attr_e.status = "deleted" THEN [1] ELSE [] END |
85
+ SET has_attr_e.to = $current_time
83
86
  )
84
87
  """ % {
85
88
  "branch_filter": branch_filter,
86
89
  "node_kind": self.node_kind,
87
90
  "uuid_generation": db.render_uuid_generation(node_label="a", node_attr="uuid"),
88
91
  }
92
+
89
93
  self.add_to_query(query)
90
94
  self.return_labels = ["n.uuid", "a.uuid"]
@@ -81,11 +81,15 @@ class CoreIPAddressPool(Node):
81
81
  return node
82
82
 
83
83
  async def get_next(self, db: InfrahubDatabase, prefixlen: int | None = None) -> IPAddressType:
84
- # Measure utilization of all prefixes identified as resources
85
84
  resources = await self.resources.get_peers(db=db) # type: ignore[attr-defined]
86
85
  ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
87
86
 
88
- for resource in resources.values():
87
+ try:
88
+ weighted_resources = sorted(resources.values(), key=lambda r: r.allocation_weight.value or 0, reverse=True)
89
+ except AttributeError:
90
+ weighted_resources = list(resources.values())
91
+
92
+ for resource in weighted_resources:
89
93
  ip_prefix = ipaddress.ip_network(resource.prefix.value) # type: ignore[attr-defined]
90
94
  prefix_length = prefixlen or ip_prefix.prefixlen
91
95
 
@@ -88,11 +88,15 @@ class CoreIPPrefixPool(Node):
88
88
  return node
89
89
 
90
90
  async def get_next(self, db: InfrahubDatabase, prefixlen: int) -> IPNetworkType:
91
- # Measure utilization of all prefixes identified as resources
92
91
  resources = await self.resources.get_peers(db=db) # type: ignore[attr-defined]
93
92
  ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
94
93
 
95
- for resource in resources.values():
94
+ try:
95
+ weighted_resources = sorted(resources.values(), key=lambda r: r.allocation_weight.value or 0, reverse=True)
96
+ except AttributeError:
97
+ weighted_resources = list(resources.values())
98
+
99
+ for resource in weighted_resources:
96
100
  subnets = await get_subnets(
97
101
  db=db,
98
102
  ip_prefix=ipaddress.ip_network(resource.prefix.value), # type: ignore[attr-defined]
@@ -209,6 +209,10 @@ class CoreWebhook(CoreNode):
209
209
  validate_certificates: BooleanOptional
210
210
 
211
211
 
212
+ class CoreWeightedPoolResource(CoreNode):
213
+ allocation_weight: IntegerOptional
214
+
215
+
212
216
  class LineageOwner(CoreNode):
213
217
  pass
214
218
 
@@ -201,6 +201,13 @@ WITH reduce(
201
201
  diff_rel_paths = [], item IN [penultimate_path, peer_path] |
202
202
  CASE WHEN item IS NULL THEN diff_rel_paths ELSE diff_rel_paths + [item] END
203
203
  ) AS diff_rel_paths, has_more_data
204
+ // ------------------------
205
+ // make sure we still include has_more_data if diff_rel_paths is empty
206
+ // ------------------------
207
+ WITH CASE
208
+ WHEN diff_rel_paths = [] THEN [NULL]
209
+ ELSE diff_rel_paths
210
+ END AS diff_rel_paths, has_more_data
204
211
  """
205
212
 
206
213
  def get_previous_base_path_query(self, db: InfrahubDatabase) -> str:
@@ -56,7 +56,13 @@ from .propose_change_validator import (
56
56
  core_user_validator,
57
57
  )
58
58
  from .repository import core_generic_repository, core_read_only_repository, core_repository
59
- from .resource_pool import core_ip_address_pool, core_ip_prefix_pool, core_number_pool, core_resource_pool
59
+ from .resource_pool import (
60
+ core_ip_address_pool,
61
+ core_ip_prefix_pool,
62
+ core_number_pool,
63
+ core_resource_pool,
64
+ core_weighted_pool_resource,
65
+ )
60
66
  from .template import core_object_component_template, core_object_template
61
67
  from .transform import core_transform, core_transform_jinja2, core_transform_python
62
68
  from .webhook import core_custom_webhook, core_standard_webhook, core_webhook
@@ -81,6 +87,7 @@ core_models_mixed: dict[str, list] = {
81
87
  builtin_ip_prefix,
82
88
  builtin_ip_address,
83
89
  core_resource_pool,
90
+ core_weighted_pool_resource,
84
91
  core_generic_account,
85
92
  core_base_permission,
86
93
  core_credential,
@@ -32,6 +32,26 @@ core_resource_pool = GenericSchema(
32
32
  ],
33
33
  )
34
34
 
35
+ core_weighted_pool_resource = GenericSchema(
36
+ name="WeightedPoolResource",
37
+ namespace="Core",
38
+ label="Weighted Pool Resource",
39
+ description="Resource to be used in a pool, its weight is used to determine its priority on allocation.",
40
+ include_in_menu=False,
41
+ branch=BranchSupportType.AWARE,
42
+ generate_profile=False,
43
+ attributes=[
44
+ Attr(
45
+ name="allocation_weight",
46
+ label="Weight",
47
+ description="Weight determines allocation priority, resources with higher values are selected first.",
48
+ kind="Number",
49
+ optional=True,
50
+ order_weight=10000,
51
+ )
52
+ ],
53
+ )
54
+
35
55
  core_ip_prefix_pool = NodeSchema(
36
56
  name="IPPrefixPool",
37
57
  namespace="Core",
@@ -1957,7 +1957,11 @@ class SchemaBranch:
1957
1957
  )
1958
1958
 
1959
1959
  parent_hfid = f"{relationship.name}__template_name__value"
1960
- if relationship.kind == RelationshipKind.PARENT and parent_hfid not in template_schema.human_friendly_id:
1960
+ if (
1961
+ not isinstance(template_schema, GenericSchema)
1962
+ and relationship.kind == RelationshipKind.PARENT
1963
+ and parent_hfid not in template_schema.human_friendly_id
1964
+ ):
1961
1965
  template_schema.human_friendly_id = [parent_hfid] + template_schema.human_friendly_id
1962
1966
  template_schema.uniqueness_constraints[0].append(relationship.name)
1963
1967
 
@@ -1992,7 +1996,6 @@ class SchemaBranch:
1992
1996
  include_in_menu=False,
1993
1997
  display_labels=["template_name__value"],
1994
1998
  human_friendly_id=["template_name__value"],
1995
- uniqueness_constraints=[["template_name__value"]],
1996
1999
  attributes=[template_name_attr],
1997
2000
  )
1998
2001
 
@@ -2011,7 +2014,6 @@ class SchemaBranch:
2011
2014
  human_friendly_id=["template_name__value"],
2012
2015
  uniqueness_constraints=[["template_name__value"]],
2013
2016
  inherit_from=[InfrahubKind.LINEAGESOURCE, InfrahubKind.NODE, core_template_schema.kind],
2014
- default_filter="template_name__value",
2015
2017
  attributes=[template_name_attr],
2016
2018
  relationships=[
2017
2019
  RelationshipSchema(
@@ -71,7 +71,7 @@ async def schema_path_validate(
71
71
  schema_branch: SchemaBranch,
72
72
  service: InfrahubServices,
73
73
  ) -> SchemaValidatorPathResponseData:
74
- async with service.database.start_session() as db:
74
+ async with service.database.start_session(read_only=True) as db:
75
75
  constraint_request = SchemaConstraintValidatorRequest(
76
76
  branch=branch,
77
77
  constraint_name=constraint_name,
@@ -476,8 +476,6 @@ async def validate_database(
476
476
 
477
477
 
478
478
  async def get_db(retry: int = 0) -> AsyncDriver:
479
- URI = f"{config.SETTINGS.database.protocol}://{config.SETTINGS.database.address}:{config.SETTINGS.database.port}"
480
-
481
479
  trusted_certificates = TrustSystemCAs()
482
480
  if config.SETTINGS.database.tls_insecure:
483
481
  trusted_certificates = TrustAll()
@@ -485,11 +483,14 @@ async def get_db(retry: int = 0) -> AsyncDriver:
485
483
  trusted_certificates = TrustCustomCAs(config.SETTINGS.database.tls_ca_file)
486
484
 
487
485
  driver = AsyncGraphDatabase.driver(
488
- URI,
486
+ config.SETTINGS.database.database_uri,
489
487
  auth=(config.SETTINGS.database.username, config.SETTINGS.database.password),
490
488
  encrypted=config.SETTINGS.database.tls_enabled,
491
489
  trusted_certificates=trusted_certificates,
492
- notifications_disabled_categories=[NotificationDisabledCategory.UNRECOGNIZED],
490
+ notifications_disabled_categories=[
491
+ NotificationDisabledCategory.UNRECOGNIZED,
492
+ NotificationDisabledCategory.DEPRECATION, # TODO: Remove me with 1.3
493
+ ],
493
494
  notifications_min_severity=NotificationMinimumSeverity.WARNING,
494
495
  )
495
496
 
@@ -0,0 +1,101 @@
1
+ from infrahub.database import InfrahubDatabase
2
+
3
+
4
+ async def verify_no_duplicate_relationships(db: InfrahubDatabase) -> None:
5
+ """
6
+ Verify that no duplicate active relationships exist at the database level
7
+ A duplicate is defined as
8
+ - connecting the same two nodes
9
+ - having the same identifier
10
+ - having the same direction (inbound, outbound, bidirectional)
11
+ - having the same branch
12
+ A more thorough check that no duplicates exist at any point in time is possible, but more complex
13
+ """
14
+ query = """
15
+ MATCH (a:Node)-[e1:IS_RELATED {status: "active"}]-(rel:Relationship)-[e2:IS_RELATED {branch: e1.branch, status: "active"}]-(b:Node)
16
+ WHERE a.uuid <> b.uuid
17
+ AND e1.to IS NULL
18
+ AND e2.to IS NULL
19
+ WITH a, rel.name AS rel_name, b, e1.branch AS branch, CASE
20
+ WHEN startNode(e1) = a AND startNode(e2) = rel THEN "out"
21
+ WHEN startNode(e1) = rel AND startNode(e2) = b THEN "in"
22
+ ELSE "bidir"
23
+ END AS direction, COUNT(*) AS num_duplicates
24
+ WHERE num_duplicates > 1
25
+ RETURN a.uuid AS node_id1, b.uuid AS node_id2, rel_name, branch, direction, num_duplicates
26
+ """
27
+ results = await db.execute_query(query=query)
28
+ for result in results:
29
+ node_id1 = result.get("node_id1")
30
+ node_id2 = result.get("node_id2")
31
+ rel_name = result.get("rel_name")
32
+ branch = result.get("branch")
33
+ direction = result.get("direction")
34
+ num_duplicates = result.get("num_duplicates")
35
+ raise ValueError(
36
+ f"{num_duplicates} duplicate relationships ({branch=},{direction=}) between nodes '{node_id1}' and '{node_id2}'"
37
+ f" with relationship name '{rel_name}'"
38
+ )
39
+
40
+
41
+ async def verify_no_edges_added_after_node_delete(db: InfrahubDatabase) -> None:
42
+ """
43
+ Verify that no edges are added to a Node after it is deleted on a given branch
44
+ """
45
+ query = """
46
+ // ------------
47
+ // find deleted nodes
48
+ // ------------
49
+ MATCH (n:Node)-[e:IS_PART_OF]->(:Root)
50
+ WHERE e.status = "deleted" OR e.to IS NOT NULL
51
+ WITH DISTINCT n, e.branch AS delete_branch, e.branch_level AS delete_branch_level, CASE
52
+ WHEN e.status = "deleted" THEN e.from
53
+ ELSE e.to
54
+ END AS delete_time
55
+ // ------------
56
+ // find the edges added to the deleted node after the delete time
57
+ // ------------
58
+ MATCH (n)-[added_e]-(peer)
59
+ WHERE added_e.from > delete_time
60
+ AND type(added_e) <> "IS_PART_OF"
61
+ // if the node was deleted on a branch (delete_branch_level > 1), and then updated on main/global (added_e.branch_level = 1), we can ignore it
62
+ AND added_e.branch_level >= delete_branch_level
63
+ AND (added_e.branch = delete_branch OR delete_branch_level = 1)
64
+ WITH DISTINCT n, delete_branch, delete_time, added_e, peer AS added_peer
65
+ // ------------
66
+ // get the branched_from for the branch on which the node was deleted
67
+ // ------------
68
+ CALL {
69
+ WITH added_e
70
+ MATCH (b:Branch {name: added_e.branch})
71
+ RETURN b.branched_from AS added_e_branched_from
72
+ }
73
+ // ------------
74
+ // account for the following situations, given that the edge update time is after the node delete time
75
+ // - deleted on main/global, updated on branch
76
+ // - illegal if the delete is before branch.branched_from
77
+ // - deleted on branch, updated on branch
78
+ // - illegal
79
+ // ------------
80
+ WITH n, delete_branch, delete_time, added_e, added_peer
81
+ WHERE delete_branch = added_e.branch
82
+ OR delete_time < added_e_branched_from
83
+ RETURN n.uuid AS n_uuid, delete_branch, delete_time, added_e, added_peer
84
+ """
85
+ results = await db.execute_query(query=query)
86
+ error_messages = []
87
+ for result in results:
88
+ n_uuid = result.get("n_uuid")
89
+ delete_branch = result.get("delete_branch")
90
+ delete_time = result.get("delete_time")
91
+ added_e = result.get("added_e")
92
+ added_e_branch = added_e.get("branch")
93
+ added_e_from = added_e.get("from")
94
+ added_peer = result.get("added_peer")
95
+ message = (
96
+ f"Node {n_uuid} was deleted on {delete_branch} at {delete_time} but has an {added_e.type} edge added on"
97
+ f" branch {added_e_branch} at {added_e_from} to {added_peer.element_id}"
98
+ )
99
+ error_messages.append(message)
100
+ if error_messages:
101
+ raise ValueError(error_messages)
infrahub/graphql/app.py CHANGED
@@ -155,7 +155,7 @@ class InfrahubGraphQLApp:
155
155
 
156
156
  db = websocket.app.state.db
157
157
 
158
- async with db.start_session() as db:
158
+ async with db.start_session(read_only=True) as db:
159
159
  branch_name = websocket.path_params.get("branch_name", registry.default_branch)
160
160
  branch = await registry.get_branch(db=db, branch=branch_name)
161
161
 
@@ -53,7 +53,7 @@ class NodeDataLoader(DataLoader[str, Node | None]):
53
53
  self.db = db
54
54
 
55
55
  async def batch_load_fn(self, keys: list[Any]) -> list[Node | None]:
56
- async with self.db.start_session() as db:
56
+ async with self.db.start_session(read_only=True) as db:
57
57
  nodes_by_id = await NodeManager.get_many(
58
58
  db=db,
59
59
  ids=keys,
@@ -51,7 +51,7 @@ class PeerRelationshipsDataLoader(DataLoader[str, list[Relationship]]):
51
51
  self.db = db
52
52
 
53
53
  async def batch_load_fn(self, keys: list[Any]) -> list[list[Relationship]]: # pylint: disable=method-hidden
54
- async with self.db.start_session() as db:
54
+ async with self.db.start_session(read_only=True) as db:
55
55
  peer_rels = await NodeManager.query_peers(
56
56
  db=db,
57
57
  ids=keys,
@@ -533,7 +533,7 @@ class InfrahubMutationMixin:
533
533
  except HFIDViolatedError as exc:
534
534
  # Only the HFID constraint has been violated, it means the node exists and we can update without rerunning constraints
535
535
  if len(exc.matching_nodes_ids) > 1:
536
- raise RuntimeError(f"Multiple {schema_name} nodes have the same hfid (database corrupted)") from exc
536
+ raise RuntimeError(f"Multiple {schema_name} nodes have the same hfid") from exc
537
537
  node_id = list(exc.matching_nodes_ids)[0]
538
538
  node = await NodeManager.get_one(db=db, id=node_id, kind=schema_name, branch=branch, raise_on_error=True)
539
539
  updated_obj, mutation = await cls._call_mutate_update(
@@ -222,7 +222,7 @@ class ProposedChangeMerge(Mutation):
222
222
 
223
223
  async with graphql_context.db.start_session() as db:
224
224
  proposed_change.state.value = ProposedChangeState.MERGING.value
225
- proposed_change.save(db=db)
225
+ await proposed_change.save(db=db)
226
226
 
227
227
  if wait_until_completion:
228
228
  await graphql_context.service.workflow.execute_workflow(
@@ -34,7 +34,7 @@ class Relationships(ObjectType):
34
34
 
35
35
  response: dict[str, Any] = {"edges": [], "count": None}
36
36
 
37
- async with graphql_context.db.start_session() as db:
37
+ async with graphql_context.db.start_session(read_only=True) as db:
38
38
  query = await RelationshipGetByIdentifierQuery.init(
39
39
  db=db,
40
40
  branch=graphql_context.branch,
@@ -32,6 +32,8 @@ class Tasks(ObjectType):
32
32
  workflow: list[str] | None = None,
33
33
  related_node__ids: list | None = None,
34
34
  q: str | None = None,
35
+ log_limit: int | None = None,
36
+ log_offset: int | None = None,
35
37
  ) -> dict[str, Any]:
36
38
  related_nodes = related_node__ids or []
37
39
  ids = ids or []
@@ -45,6 +47,8 @@ class Tasks(ObjectType):
45
47
  statuses=state,
46
48
  workflows=workflow,
47
49
  related_nodes=related_nodes,
50
+ log_limit=log_limit,
51
+ log_offset=log_offset,
48
52
  )
49
53
 
50
54
  @staticmethod
@@ -71,6 +75,8 @@ class Tasks(ObjectType):
71
75
  branch: str | None = None,
72
76
  limit: int | None = None,
73
77
  offset: int | None = None,
78
+ log_limit: int | None = None,
79
+ log_offset: int | None = None,
74
80
  ) -> dict[str, Any]:
75
81
  graphql_context: GraphqlContext = info.context
76
82
  fields = await extract_fields_first_node(info)
@@ -87,6 +93,8 @@ class Tasks(ObjectType):
87
93
  related_nodes=related_nodes,
88
94
  limit=limit,
89
95
  offset=offset,
96
+ log_limit=log_limit,
97
+ log_offset=log_offset,
90
98
  )
91
99
  prefect_count = prefect_tasks.get("count", None)
92
100
  return {
@@ -105,6 +113,8 @@ Task = Field(
105
113
  workflow=List(String),
106
114
  ids=List(String),
107
115
  q=String(required=False),
116
+ log_limit=Int(required=False),
117
+ log_offset=Int(required=False),
108
118
  resolver=Tasks.resolve,
109
119
  required=True,
110
120
  )
@@ -33,7 +33,7 @@ class ManyRelationshipResolver:
33
33
  parent_id: str,
34
34
  node_schema: NodeSchema,
35
35
  ) -> list[str]:
36
- async with db.start_session() as dbs:
36
+ async with db.start_session(read_only=True) as dbs:
37
37
  query = await NodeGetHierarchyQuery.init(
38
38
  db=dbs,
39
39
  direction=RelationshipHierarchyDirection.DESCENDANTS,
@@ -55,7 +55,7 @@ class ManyRelationshipResolver:
55
55
  rel_schema: RelationshipSchema,
56
56
  filters: dict[str, Any],
57
57
  ) -> int:
58
- async with db.start_session() as dbs:
58
+ async with db.start_session(read_only=True) as dbs:
59
59
  return await NodeManager.count_peers(
60
60
  db=dbs,
61
61
  ids=ids,
@@ -194,7 +194,7 @@ class ManyRelationshipResolver:
194
194
  offset: int | None = None,
195
195
  limit: int | None = None,
196
196
  ) -> list[dict[str, Any]] | None:
197
- async with db.start_session() as dbs:
197
+ async with db.start_session(read_only=True) as dbs:
198
198
  objs = await NodeManager.query_peers(
199
199
  db=dbs,
200
200
  ids=ids,
@@ -257,7 +257,7 @@ class ManyRelationshipResolver:
257
257
  all_peer_rels.extend(node_peer_rels)
258
258
  if not all_peer_rels:
259
259
  return None
260
- async with db.start_session() as dbs:
260
+ async with db.start_session(read_only=True) as dbs:
261
261
  return [
262
262
  await obj.to_graphql(db=dbs, fields=node_fields, related_node_ids=related_node_ids)
263
263
  for obj in all_peer_rels
@@ -29,7 +29,7 @@ async def account_resolver(
29
29
  fields = await extract_fields(info.field_nodes[0].selection_set)
30
30
  graphql_context: GraphqlContext = info.context
31
31
 
32
- async with graphql_context.db.start_session() as db:
32
+ async with graphql_context.db.start_session(read_only=True) as db:
33
33
  results = await NodeManager.query(
34
34
  schema=InfrahubKind.GENERICACCOUNT,
35
35
  filters={"ids": [graphql_context.account_session.account_id]},
@@ -102,7 +102,7 @@ async def default_resolver(*args: Any, **kwargs) -> dict | list[dict] | None:
102
102
  if "__" in key and value or key in ["id", "ids"]
103
103
  }
104
104
 
105
- async with graphql_context.db.start_session() as db:
105
+ async with graphql_context.db.start_session(read_only=True) as db:
106
106
  objs = await NodeManager.query_peers(
107
107
  db=db,
108
108
  ids=[parent["id"]],
@@ -158,7 +158,7 @@ async def default_paginated_list_resolver(
158
158
  fields = await extract_selection(info.field_nodes[0], schema=schema)
159
159
 
160
160
  graphql_context: GraphqlContext = info.context
161
- async with graphql_context.db.start_session() as db:
161
+ async with graphql_context.db.start_session(read_only=True) as db:
162
162
  response: dict[str, Any] = {"edges": []}
163
163
  filters = {
164
164
  key: value for key, value in kwargs.items() if ("__" in key and value is not None) or key in ("ids", "hfid")
@@ -293,7 +293,7 @@ async def hierarchy_resolver(
293
293
 
294
294
  response: dict[str, Any] = {"edges": [], "count": None}
295
295
 
296
- async with graphql_context.db.start_session() as db:
296
+ async with graphql_context.db.start_session(read_only=True) as db:
297
297
  if "count" in fields:
298
298
  response["count"] = await NodeManager.count_hierarchy(
299
299
  db=db,
@@ -109,7 +109,7 @@ class SingleRelationshipResolver:
109
109
  for key, value in kwargs.items()
110
110
  if "__" in key and value or key in ["id", "ids"]
111
111
  }
112
- async with db.start_session() as dbs:
112
+ async with db.start_session(read_only=True) as dbs:
113
113
  objs = await NodeManager.query_peers(
114
114
  db=dbs,
115
115
  ids=[parent_id],
@@ -171,5 +171,5 @@ class SingleRelationshipResolver:
171
171
  node = await loader.load(key=peer_id)
172
172
  if not node:
173
173
  return None
174
- async with db.start_session() as dbs:
174
+ async with db.start_session(read_only=True) as dbs:
175
175
  return await node.to_graphql(db=dbs, fields=node_fields, related_node_ids=related_node_ids)
@@ -29,7 +29,7 @@ async def resolver_graphql_query(
29
29
  graphql_context: GraphqlContext = info.context
30
30
  at = Timestamp()
31
31
 
32
- async with graphql_context.db.start_session() as db:
32
+ async with graphql_context.db.start_session(read_only=True) as db:
33
33
  # Find the GraphQLQuery and the GraphQL Schema
34
34
  graphql_query = await NodeManager.get_one_by_default_filter(
35
35
  db=db, id=name, kind=CoreGraphQLQuery, branch=graphql_context.branch, at=at
@@ -38,7 +38,7 @@ async def resolver_graphql_query(
38
38
  raise ValueError(f"Unable to find the {InfrahubKind.GRAPHQLQUERY} {name}")
39
39
 
40
40
  while True:
41
- async with graphql_context.db.start_session() as db:
41
+ async with graphql_context.db.start_session(read_only=True) as db:
42
42
  result = await graphql(
43
43
  schema=graphql_schema,
44
44
  source=graphql_query.query.value,
@@ -37,7 +37,7 @@ class BranchType(InfrahubObjectType):
37
37
  graphql_context: GraphqlContext,
38
38
  **kwargs: Any,
39
39
  ) -> list[dict[str, Any]]:
40
- async with graphql_context.db.start_session() as db:
40
+ async with graphql_context.db.start_session(read_only=True) as db:
41
41
  objs = await Branch.get_list(db=db, **kwargs)
42
42
 
43
43
  if not objs:
@@ -1,4 +1,4 @@
1
- from graphene import Field, InputObjectType, List, ObjectType, String
1
+ from graphene import Field, InputObjectType, Int, List, NonNull, ObjectType, String
2
2
  from graphene.types.uuid import UUID
3
3
 
4
4
  from .enums import Severity
@@ -26,4 +26,5 @@ class TaskLogNodes(ObjectType):
26
26
 
27
27
 
28
28
  class TaskLogEdge(ObjectType):
29
- edges = List(TaskLogNodes)
29
+ edges = List(NonNull(TaskLogNodes), required=True)
30
+ count = Int(required=True)