infrahub-server 1.7.1__py3-none-any.whl → 1.7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/actions/gather.py +2 -2
- infrahub/api/query.py +3 -2
- infrahub/api/transformation.py +3 -3
- infrahub/computed_attribute/gather.py +2 -0
- infrahub/config.py +2 -2
- infrahub/core/attribute.py +21 -2
- infrahub/core/diff/model/path.py +43 -0
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/graph/index.py +2 -0
- infrahub/core/ipam/resource_allocator.py +229 -0
- infrahub/core/migrations/graph/__init__.py +8 -0
- infrahub/core/migrations/graph/m052_fix_global_branch_level.py +51 -0
- infrahub/core/migrations/graph/m053_fix_branch_level_zero.py +61 -0
- infrahub/core/migrations/graph/m054_cleanup_orphaned_nodes.py +87 -0
- infrahub/core/migrations/graph/m055_remove_webhook_validate_certificates_default.py +86 -0
- infrahub/core/migrations/schema/node_attribute_add.py +17 -19
- infrahub/core/node/lock_utils.py +23 -2
- infrahub/core/node/resource_manager/ip_address_pool.py +5 -11
- infrahub/core/node/resource_manager/ip_prefix_pool.py +5 -21
- infrahub/core/node/resource_manager/number_pool.py +109 -39
- infrahub/core/query/__init__.py +7 -1
- infrahub/core/query/branch.py +18 -2
- infrahub/core/query/ipam.py +629 -40
- infrahub/core/query/node.py +128 -0
- infrahub/core/query/resource_manager.py +114 -1
- infrahub/core/relationship/model.py +1 -1
- infrahub/core/schema/definitions/core/webhook.py +0 -1
- infrahub/core/schema/definitions/internal.py +7 -4
- infrahub/core/validators/determiner.py +4 -0
- infrahub/graphql/analyzer.py +3 -1
- infrahub/graphql/app.py +7 -10
- infrahub/graphql/execution.py +95 -0
- infrahub/graphql/mutations/proposed_change.py +15 -0
- infrahub/graphql/parser.py +10 -7
- infrahub/graphql/queries/ipam.py +20 -25
- infrahub/graphql/queries/search.py +29 -9
- infrahub/proposed_change/tasks.py +2 -0
- infrahub/services/adapters/http/httpx.py +27 -0
- infrahub/trigger/catalogue.py +2 -0
- infrahub/trigger/models.py +73 -4
- infrahub/trigger/setup.py +1 -1
- infrahub/trigger/system.py +36 -0
- infrahub/webhook/models.py +4 -2
- infrahub/webhook/tasks.py +2 -2
- infrahub/workflows/initialization.py +2 -2
- {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/METADATA +3 -3
- {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/RECORD +52 -46
- infrahub_testcontainers/docker-compose-cluster.test.yml +16 -10
- infrahub_testcontainers/docker-compose.test.yml +11 -10
- infrahub/pools/address.py +0 -16
- {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/WHEEL +0 -0
- {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/entry_points.txt +0 -0
- {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/licenses/LICENSE.txt +0 -0
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any, Sequence
|
|
4
|
+
|
|
5
|
+
from infrahub.core.migrations.shared import GraphMigration, MigrationInput, MigrationResult
|
|
6
|
+
|
|
7
|
+
from ...query import Query, QueryType
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from infrahub.database import InfrahubDatabase
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class CleanupOrphanedNodesQuery(Query):
|
|
14
|
+
"""
|
|
15
|
+
Clean up orphaned Node vertices (no IS_PART_OF edge to Root) and their linked
|
|
16
|
+
Attributes and Relationships.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
name = "cleanup_orphaned_nodes"
|
|
20
|
+
type = QueryType.WRITE
|
|
21
|
+
insert_return = False
|
|
22
|
+
raise_error_if_empty = False
|
|
23
|
+
|
|
24
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
25
|
+
query = """
|
|
26
|
+
// Delete attributes of orphaned nodes
|
|
27
|
+
MATCH (n:Node)
|
|
28
|
+
WHERE NOT exists((n)-[:IS_PART_OF]->(:Root))
|
|
29
|
+
OPTIONAL MATCH (n)-[:HAS_ATTRIBUTE]->(attr:Attribute)
|
|
30
|
+
WITH DISTINCT attr
|
|
31
|
+
CALL (attr) {
|
|
32
|
+
DETACH DELETE attr
|
|
33
|
+
} IN TRANSACTIONS
|
|
34
|
+
|
|
35
|
+
// reduce the results to a single row
|
|
36
|
+
WITH 1 AS one
|
|
37
|
+
LIMIT 1
|
|
38
|
+
|
|
39
|
+
// Delete relationships that will have < 2 Node peers after orphaned node removal
|
|
40
|
+
OPTIONAL MATCH (orphan:Node)-[:IS_RELATED]-(rel:Relationship)
|
|
41
|
+
WHERE NOT exists((orphan)-[:IS_PART_OF]->(:Root))
|
|
42
|
+
WITH DISTINCT rel
|
|
43
|
+
CALL (rel) {
|
|
44
|
+
OPTIONAL MATCH (rel)-[:IS_RELATED]-(peer:Node)
|
|
45
|
+
WHERE exists((peer)-[:IS_PART_OF]->(:Root))
|
|
46
|
+
WITH rel, count(peer) AS remaining_peers
|
|
47
|
+
WHERE remaining_peers < 2
|
|
48
|
+
DETACH DELETE rel
|
|
49
|
+
} IN TRANSACTIONS
|
|
50
|
+
|
|
51
|
+
// reduce the results to a single row
|
|
52
|
+
WITH 1 AS one
|
|
53
|
+
LIMIT 1
|
|
54
|
+
|
|
55
|
+
// Delete the orphaned nodes
|
|
56
|
+
MATCH (n:Node)
|
|
57
|
+
WHERE NOT exists((n)-[:IS_PART_OF]->(:Root))
|
|
58
|
+
CALL (n) {
|
|
59
|
+
DETACH DELETE n
|
|
60
|
+
} IN TRANSACTIONS
|
|
61
|
+
"""
|
|
62
|
+
self.add_to_query(query)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class Migration054(GraphMigration):
|
|
66
|
+
"""
|
|
67
|
+
Clean up orphaned Node vertices that have no IS_PART_OF edge to Root.
|
|
68
|
+
|
|
69
|
+
This can happen when a branch-aware node is deleted during branch deletion,
|
|
70
|
+
but its branch-agnostic attributes or relationships are not properly cleaned up.
|
|
71
|
+
|
|
72
|
+
The migration:
|
|
73
|
+
1. DETACH DELETEs Attributes linked to orphaned nodes
|
|
74
|
+
2. DETACH DELETEs Relationships that would have < 2 Node peers after orphaned node removal
|
|
75
|
+
3. DETACH DELETEs the orphaned nodes themselves
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
name: str = "054_cleanup_orphaned_nodes"
|
|
79
|
+
minimum_version: int = 53
|
|
80
|
+
queries: Sequence[type[Query]] = [CleanupOrphanedNodesQuery]
|
|
81
|
+
|
|
82
|
+
async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
|
|
83
|
+
return MigrationResult()
|
|
84
|
+
|
|
85
|
+
async def execute(self, migration_input: MigrationInput) -> MigrationResult:
|
|
86
|
+
# Override parent class to skip transaction in case there are many nodes to delete
|
|
87
|
+
return await self.do_execute(migration_input=migration_input)
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any, Sequence
|
|
4
|
+
|
|
5
|
+
from infrahub.core.constants import NULL_VALUE
|
|
6
|
+
from infrahub.core.migrations.shared import MigrationResult
|
|
7
|
+
from infrahub.core.query import Query, QueryType
|
|
8
|
+
|
|
9
|
+
from ..shared import GraphMigration
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from infrahub.database import InfrahubDatabase
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class Migration055Query01(Query):
|
|
16
|
+
"""Remove the default_value from CoreWebhook's validate_certificates attribute.
|
|
17
|
+
|
|
18
|
+
This migration finds the CoreWebhook SchemaGeneric, locates its validate_certificates
|
|
19
|
+
SchemaAttribute, and sets the default_value to NULL. This removes the previous default
|
|
20
|
+
of True.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
name = "migration_055_01"
|
|
24
|
+
type: QueryType = QueryType.WRITE
|
|
25
|
+
|
|
26
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
27
|
+
self.params["at"] = self.at.to_string()
|
|
28
|
+
self.params["null_value"] = NULL_VALUE
|
|
29
|
+
|
|
30
|
+
query = """
|
|
31
|
+
// get the generic schema
|
|
32
|
+
MATCH p1 = (sg:SchemaGeneric)-[:HAS_ATTRIBUTE]->(:Attribute {name: "name"})-[:HAS_VALUE]->(:AttributeValueIndexed {value: "Webhook"})
|
|
33
|
+
WHERE all(r IN relationships(p1) WHERE r.status = "active" AND r.to IS NULL)
|
|
34
|
+
|
|
35
|
+
// for safety, also check that the sg is in the namespace "Core"
|
|
36
|
+
MATCH p2 = (sg)-[:HAS_ATTRIBUTE]->(:Attribute {name: "namespace"})-[:HAS_VALUE]->(:AttributeValueIndexed {value: "Core"})
|
|
37
|
+
WHERE all(r IN relationships(p2) WHERE r.status = "active" AND r.to IS NULL)
|
|
38
|
+
|
|
39
|
+
// there should only be 1 CoreWebhook schema generic
|
|
40
|
+
WITH sg
|
|
41
|
+
LIMIT 1
|
|
42
|
+
|
|
43
|
+
// find the validate_certificates attribute
|
|
44
|
+
MATCH p3 = (sg)-[:IS_RELATED]-(:Relationship {name: "schema__node__attributes"})
|
|
45
|
+
-[:IS_RELATED]-(sa:SchemaAttribute)
|
|
46
|
+
-[:HAS_ATTRIBUTE]->(:Attribute {name: "name"})
|
|
47
|
+
-[:HAS_VALUE]->(:AttributeValueIndexed {value: "validate_certificates"})
|
|
48
|
+
WHERE all(r IN relationships(p3) WHERE r.status = "active" AND r.to IS NULL)
|
|
49
|
+
// there should only be 1 validate_certificates attribute
|
|
50
|
+
WITH sa
|
|
51
|
+
LIMIT 1
|
|
52
|
+
|
|
53
|
+
// get the default_value Attribute
|
|
54
|
+
MATCH (sa)-[ha:HAS_ATTRIBUTE]->(default_value_attr:Attribute {name: "default_value"})-[hv:HAS_VALUE]->(default_value)
|
|
55
|
+
WHERE all(r IN [ha, hv] WHERE r.status = "active" AND r.to IS NULL)
|
|
56
|
+
LIMIT 1
|
|
57
|
+
|
|
58
|
+
// skip if it is already NULL
|
|
59
|
+
WITH sa, default_value_attr, hv, default_value
|
|
60
|
+
WHERE default_value.value <> $null_value
|
|
61
|
+
|
|
62
|
+
// close the HAS_VALUE edge for the current default value
|
|
63
|
+
SET hv.to = $at
|
|
64
|
+
|
|
65
|
+
// get the new value
|
|
66
|
+
MERGE (new_value:AttributeValue:AttributeValueIndexed {value: $null_value, is_default: true})
|
|
67
|
+
LIMIT 1
|
|
68
|
+
|
|
69
|
+
// link the new value
|
|
70
|
+
CREATE (default_value_attr)-[new_hv:HAS_VALUE]->(new_value)
|
|
71
|
+
SET new_hv = properties(hv)
|
|
72
|
+
SET new_hv.from = $at, new_hv.to = NULL
|
|
73
|
+
"""
|
|
74
|
+
self.add_to_query(query)
|
|
75
|
+
self.return_labels = ["new_value"]
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class Migration055(GraphMigration):
|
|
79
|
+
name: str = "055_remove_webhook_validate_certificates_default"
|
|
80
|
+
queries: Sequence[type[Query]] = [Migration055Query01]
|
|
81
|
+
minimum_version: int = 54
|
|
82
|
+
|
|
83
|
+
async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
|
|
84
|
+
result = MigrationResult()
|
|
85
|
+
|
|
86
|
+
return result
|
|
@@ -6,7 +6,6 @@ from infrahub.core import registry
|
|
|
6
6
|
from infrahub.core.node import Node
|
|
7
7
|
from infrahub.core.schema.generic_schema import GenericSchema
|
|
8
8
|
from infrahub.core.schema.node_schema import NodeSchema
|
|
9
|
-
from infrahub.exceptions import PoolExhaustedError
|
|
10
9
|
from infrahub.tasks.registry import update_branch_registry
|
|
11
10
|
|
|
12
11
|
from ..query import AttributeMigrationQuery, MigrationBaseQuery
|
|
@@ -17,6 +16,7 @@ if TYPE_CHECKING:
|
|
|
17
16
|
from infrahub.core.node.resource_manager.number_pool import CoreNumberPool
|
|
18
17
|
from infrahub.core.schema import MainSchemaTypes
|
|
19
18
|
from infrahub.core.schema.attribute_schema import AttributeSchema
|
|
19
|
+
from infrahub.database import InfrahubDatabase
|
|
20
20
|
|
|
21
21
|
from ...branch import Branch
|
|
22
22
|
|
|
@@ -93,23 +93,21 @@ class NodeAttributeAddMigration(AttributeSchemaMigration):
|
|
|
93
93
|
db=db, branch=branch, schema=self.new_schema, fields={"id": True, self.new_attribute_schema.name: True}
|
|
94
94
|
)
|
|
95
95
|
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
await
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
await node.save(db=db, fields=[self.new_attribute_schema.name], at=at)
|
|
96
|
+
async def allocate_numbers(db: InfrahubDatabase) -> None:
|
|
97
|
+
for node in nodes:
|
|
98
|
+
number = await number_pool.get_resource(
|
|
99
|
+
db=db, branch=branch, node=node, attribute=self.new_attribute_schema, at=at
|
|
100
|
+
)
|
|
101
|
+
attr = node.get_attribute(name=self.new_attribute_schema.name)
|
|
102
|
+
attr.value = number
|
|
103
|
+
attr.set_source(number_pool)
|
|
104
|
+
|
|
105
|
+
await node.save(db=db, fields=[self.new_attribute_schema.name], at=at)
|
|
106
|
+
|
|
107
|
+
if db.is_transaction:
|
|
108
|
+
await allocate_numbers(db=db)
|
|
109
|
+
else:
|
|
110
|
+
async with db.start_transaction() as dbt:
|
|
111
|
+
await allocate_numbers(db=dbt)
|
|
114
112
|
|
|
115
113
|
return result
|
infrahub/core/node/lock_utils.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import hashlib
|
|
2
2
|
from typing import TYPE_CHECKING
|
|
3
3
|
|
|
4
|
+
from infrahub.core.constants import RelationshipCardinality
|
|
4
5
|
from infrahub.core.node import Node
|
|
5
6
|
from infrahub.core.schema import GenericSchema
|
|
6
7
|
from infrahub.core.schema.schema_branch import SchemaBranch
|
|
@@ -10,6 +11,7 @@ if TYPE_CHECKING:
|
|
|
10
11
|
|
|
11
12
|
|
|
12
13
|
RESOURCE_POOL_LOCK_NAMESPACE = "resource_pool"
|
|
14
|
+
RELATIONSHIP_COUNT_LOCK_NAMESPACE = "relationship_count"
|
|
13
15
|
|
|
14
16
|
|
|
15
17
|
def _get_kinds_to_lock_on_object_mutation(kind: str, schema_branch: SchemaBranch) -> list[str]:
|
|
@@ -55,7 +57,8 @@ def _hash(value: str) -> str:
|
|
|
55
57
|
def get_lock_names_on_object_mutation(node: Node, schema_branch: SchemaBranch) -> list[str]:
|
|
56
58
|
"""
|
|
57
59
|
Return lock names for object on which we want to avoid concurrent mutation (create/update).
|
|
58
|
-
Lock names include kind, some generic kinds, resource pool ids,
|
|
60
|
+
Lock names include kind, some generic kinds, resource pool ids, peer ids for cardinality one relationships,
|
|
61
|
+
and values of attributes of corresponding uniqueness constraints.
|
|
59
62
|
"""
|
|
60
63
|
|
|
61
64
|
lock_names: set[str] = set()
|
|
@@ -66,13 +69,31 @@ def get_lock_names_on_object_mutation(node: Node, schema_branch: SchemaBranch) -
|
|
|
66
69
|
if attribute is not None and getattr(attribute, "from_pool", None) and "id" in attribute.from_pool:
|
|
67
70
|
lock_names.add(f"{RESOURCE_POOL_LOCK_NAMESPACE}.{attribute.from_pool['id']}")
|
|
68
71
|
|
|
69
|
-
# Check if relationships allocate resources
|
|
72
|
+
# Check if relationships allocate resources or have cardinality one constraint
|
|
70
73
|
for rel_name in node._relationships:
|
|
71
74
|
rel_manager: RelationshipManager = getattr(node, rel_name)
|
|
72
75
|
for rel in rel_manager._relationships:
|
|
73
76
|
if rel.from_pool and "id" in rel.from_pool:
|
|
74
77
|
lock_names.add(f"{RESOURCE_POOL_LOCK_NAMESPACE}.{rel.from_pool['id']}")
|
|
75
78
|
|
|
79
|
+
peer_id = rel.peer_id
|
|
80
|
+
if not peer_id or not rel.schema.identifier:
|
|
81
|
+
continue
|
|
82
|
+
|
|
83
|
+
# Check if this node's relationship has cardinality one or max/min_count constraint
|
|
84
|
+
# This prevents concurrent updates to the same node's constrained relationship
|
|
85
|
+
if rel.schema.cardinality == RelationshipCardinality.ONE or rel.schema.max_count or rel.schema.min_count:
|
|
86
|
+
lock_names.add(f"{RELATIONSHIP_COUNT_LOCK_NAMESPACE}.{rel.schema.identifier}.{node.id}")
|
|
87
|
+
|
|
88
|
+
# Check if the peer has count constraints on the reverse relationship
|
|
89
|
+
# This includes cardinality one, max_count, and min_count constraints
|
|
90
|
+
peer_schema = schema_branch.get(name=rel.schema.peer, duplicate=False)
|
|
91
|
+
peer_rel = peer_schema.get_relationship_by_identifier(id=rel.schema.identifier, raise_on_error=False)
|
|
92
|
+
if peer_rel and (
|
|
93
|
+
peer_rel.cardinality == RelationshipCardinality.ONE or peer_rel.max_count or peer_rel.min_count
|
|
94
|
+
):
|
|
95
|
+
lock_names.add(f"{RELATIONSHIP_COUNT_LOCK_NAMESPACE}.{rel.schema.identifier}.{peer_id}")
|
|
96
|
+
|
|
76
97
|
lock_kinds = _get_kinds_to_lock_on_object_mutation(node.get_kind(), schema_branch)
|
|
77
98
|
for kind in lock_kinds:
|
|
78
99
|
schema = schema_branch.get(name=kind, duplicate=False)
|
|
@@ -6,13 +6,12 @@ from typing import TYPE_CHECKING, Any
|
|
|
6
6
|
from infrahub import lock
|
|
7
7
|
from infrahub.core import registry
|
|
8
8
|
from infrahub.core.ipam.reconciler import IpamReconciler
|
|
9
|
-
from infrahub.core.
|
|
9
|
+
from infrahub.core.ipam.resource_allocator import IPAMResourceAllocator
|
|
10
10
|
from infrahub.core.query.resource_manager import (
|
|
11
11
|
IPAddressPoolGetReserved,
|
|
12
12
|
IPAddressPoolSetReserved,
|
|
13
13
|
)
|
|
14
14
|
from infrahub.exceptions import PoolExhaustedError, ValidationError
|
|
15
|
-
from infrahub.pools.address import get_available
|
|
16
15
|
|
|
17
16
|
from .. import Node
|
|
18
17
|
from ..lock_utils import RESOURCE_POOL_LOCK_NAMESPACE
|
|
@@ -88,6 +87,7 @@ class CoreIPAddressPool(Node):
|
|
|
88
87
|
async def get_next(self, db: InfrahubDatabase, prefixlen: int | None = None) -> IPAddressType:
|
|
89
88
|
resources = await self.resources.get_peers(db=db) # type: ignore[attr-defined]
|
|
90
89
|
ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
|
|
90
|
+
allocator = IPAMResourceAllocator(db=db, namespace=ip_namespace, branch=self._branch, branch_agnostic=True)
|
|
91
91
|
|
|
92
92
|
try:
|
|
93
93
|
weighted_resources = sorted(resources.values(), key=lambda r: r.allocation_weight.value or 0, reverse=True)
|
|
@@ -101,18 +101,12 @@ class CoreIPAddressPool(Node):
|
|
|
101
101
|
if not ip_prefix.prefixlen <= prefix_length <= ip_prefix.max_prefixlen:
|
|
102
102
|
raise ValidationError(input_value="Invalid prefix length for current selected prefix")
|
|
103
103
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
)
|
|
107
|
-
|
|
108
|
-
available = get_available(
|
|
109
|
-
network=ip_prefix,
|
|
110
|
-
addresses=[ip.address for ip in addresses],
|
|
104
|
+
next_address = await allocator.get_next_address(
|
|
105
|
+
ip_prefix=ip_prefix,
|
|
111
106
|
is_pool=resource.is_pool.value, # type: ignore[attr-defined]
|
|
112
107
|
)
|
|
113
108
|
|
|
114
|
-
if
|
|
115
|
-
next_address = available.iter_cidrs()[0]
|
|
109
|
+
if next_address:
|
|
116
110
|
return ipaddress.ip_interface(f"{next_address.ip}/{prefix_length}")
|
|
117
111
|
|
|
118
112
|
raise PoolExhaustedError("There are no more addresses available in this pool.")
|
|
@@ -3,18 +3,15 @@ from __future__ import annotations
|
|
|
3
3
|
import ipaddress
|
|
4
4
|
from typing import TYPE_CHECKING, Any
|
|
5
5
|
|
|
6
|
-
from netaddr import IPSet
|
|
7
|
-
|
|
8
6
|
from infrahub import lock
|
|
9
7
|
from infrahub.core import registry
|
|
10
8
|
from infrahub.core.ipam.reconciler import IpamReconciler
|
|
11
|
-
from infrahub.core.
|
|
9
|
+
from infrahub.core.ipam.resource_allocator import IPAMResourceAllocator
|
|
12
10
|
from infrahub.core.query.resource_manager import (
|
|
13
11
|
PrefixPoolGetReserved,
|
|
14
12
|
PrefixPoolSetReserved,
|
|
15
13
|
)
|
|
16
14
|
from infrahub.exceptions import ValidationError
|
|
17
|
-
from infrahub.pools.prefix import get_next_available_prefix
|
|
18
15
|
|
|
19
16
|
from .. import Node
|
|
20
17
|
from ..lock_utils import RESOURCE_POOL_LOCK_NAMESPACE
|
|
@@ -96,6 +93,7 @@ class CoreIPPrefixPool(Node):
|
|
|
96
93
|
async def get_next(self, db: InfrahubDatabase, prefixlen: int) -> IPNetworkType:
|
|
97
94
|
resources = await self.resources.get_peers(db=db) # type: ignore[attr-defined]
|
|
98
95
|
ip_namespace = await self.ip_namespace.get_peer(db=db) # type: ignore[attr-defined]
|
|
96
|
+
allocator = IPAMResourceAllocator(db=db, namespace=ip_namespace, branch=self._branch, branch_agnostic=True)
|
|
99
97
|
|
|
100
98
|
try:
|
|
101
99
|
weighted_resources = sorted(resources.values(), key=lambda r: r.allocation_weight.value or 0, reverse=True)
|
|
@@ -103,23 +101,9 @@ class CoreIPPrefixPool(Node):
|
|
|
103
101
|
weighted_resources = list(resources.values())
|
|
104
102
|
|
|
105
103
|
for resource in weighted_resources:
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
namespace=ip_namespace,
|
|
110
|
-
branch=self._branch,
|
|
111
|
-
branch_agnostic=True,
|
|
112
|
-
)
|
|
113
|
-
|
|
114
|
-
pool = IPSet([resource.prefix.value])
|
|
115
|
-
for subnet in subnets:
|
|
116
|
-
pool.remove(addr=str(subnet.prefix))
|
|
117
|
-
|
|
118
|
-
try:
|
|
119
|
-
prefix_ver = ipaddress.ip_network(resource.prefix.value).version
|
|
120
|
-
next_available = get_next_available_prefix(pool=pool, prefix_length=prefixlen, prefix_ver=prefix_ver)
|
|
104
|
+
resource_prefix = ipaddress.ip_network(resource.prefix.value) # type: ignore[attr-defined]
|
|
105
|
+
next_available = await allocator.get_next_prefix(ip_prefix=resource_prefix, target_prefix_length=prefixlen)
|
|
106
|
+
if next_available:
|
|
121
107
|
return next_available
|
|
122
|
-
except ValueError:
|
|
123
|
-
continue
|
|
124
108
|
|
|
125
109
|
raise IndexError("No more resources available")
|
|
@@ -4,7 +4,12 @@ from typing import TYPE_CHECKING
|
|
|
4
4
|
|
|
5
5
|
from infrahub import lock
|
|
6
6
|
from infrahub.core import registry
|
|
7
|
-
from infrahub.core.query.resource_manager import
|
|
7
|
+
from infrahub.core.query.resource_manager import (
|
|
8
|
+
NumberPoolGetFree,
|
|
9
|
+
NumberPoolGetReserved,
|
|
10
|
+
NumberPoolGetUsed,
|
|
11
|
+
NumberPoolSetReserved,
|
|
12
|
+
)
|
|
8
13
|
from infrahub.core.schema.attribute_parameters import NumberAttributeParameters
|
|
9
14
|
from infrahub.exceptions import PoolExhaustedError
|
|
10
15
|
|
|
@@ -47,6 +52,28 @@ class CoreNumberPool(Node):
|
|
|
47
52
|
used = [result.value for result in query.iter_results()]
|
|
48
53
|
return [item for item in used if item is not None]
|
|
49
54
|
|
|
55
|
+
async def get_free(
|
|
56
|
+
self, db: InfrahubDatabase, branch: Branch, min_value: int | None = None, max_value: int | None = None
|
|
57
|
+
) -> int | None:
|
|
58
|
+
"""Returns the next free number in the pool.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
db: Database connection.
|
|
62
|
+
branch: Branch to query.
|
|
63
|
+
min_value: Minimum value to start searching from.
|
|
64
|
+
max_value: Maximum value to search up to.
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
The next free number, or None if no free numbers are available.
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
query = await NumberPoolGetFree.init(
|
|
71
|
+
db=db, branch=branch, pool=self, branch_agnostic=True, min_value=min_value, max_value=max_value
|
|
72
|
+
)
|
|
73
|
+
await query.execute(db=db)
|
|
74
|
+
|
|
75
|
+
return query.get_result_value()
|
|
76
|
+
|
|
50
77
|
async def reserve(self, db: InfrahubDatabase, number: int, identifier: str, at: Timestamp | None = None) -> None:
|
|
51
78
|
"""Reserve a number in the pool for a specific identifier."""
|
|
52
79
|
|
|
@@ -85,51 +112,94 @@ class CoreNumberPool(Node):
|
|
|
85
112
|
return number
|
|
86
113
|
|
|
87
114
|
async def get_next(self, db: InfrahubDatabase, branch: Branch, attribute: AttributeSchema) -> int:
|
|
88
|
-
|
|
115
|
+
"""Get the next available number from the pool.
|
|
89
116
|
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
parameters=attribute.parameters if isinstance(attribute.parameters, NumberAttributeParameters) else None,
|
|
95
|
-
)
|
|
96
|
-
if next_number is None:
|
|
97
|
-
raise PoolExhaustedError("There are no more values available in this pool.")
|
|
98
|
-
|
|
99
|
-
return next_number
|
|
117
|
+
Args:
|
|
118
|
+
db: Database connection.
|
|
119
|
+
branch: Branch to query.
|
|
120
|
+
attribute: Attribute schema that may contain NumberAttributeParameters constraints.
|
|
100
121
|
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
) -> list[int]:
|
|
104
|
-
taken = await self.get_used(db=db, branch=branch)
|
|
122
|
+
Returns:
|
|
123
|
+
The next available number that satisfies all constraints.
|
|
105
124
|
|
|
106
|
-
|
|
125
|
+
Raises:
|
|
126
|
+
PoolExhaustedError: If no valid numbers are available in the pool.
|
|
127
|
+
"""
|
|
128
|
+
parameters = attribute.parameters if isinstance(attribute.parameters, NumberAttributeParameters) else None
|
|
107
129
|
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
end=self.end_range.value, # type: ignore[attr-defined]
|
|
112
|
-
taken=list(set(taken) | set(allocated)),
|
|
113
|
-
parameters=attribute.parameters
|
|
114
|
-
if isinstance(attribute.parameters, NumberAttributeParameters)
|
|
115
|
-
else None,
|
|
116
|
-
)
|
|
117
|
-
if next_number is None:
|
|
118
|
-
raise PoolExhaustedError(
|
|
119
|
-
f"There are no more values available in this pool, couldn't allocate {quantity} values, only {len(allocated)} available."
|
|
120
|
-
)
|
|
130
|
+
# Extract exclusion constraints from the attribute parameters
|
|
131
|
+
excluded_values: set[int] = set()
|
|
132
|
+
excluded_ranges: list[tuple[int, int]] = []
|
|
121
133
|
|
|
122
|
-
|
|
134
|
+
if parameters:
|
|
135
|
+
excluded_values = set(parameters.get_excluded_single_values())
|
|
136
|
+
excluded_ranges = parameters.get_excluded_ranges()
|
|
123
137
|
|
|
124
|
-
|
|
138
|
+
# Compute effective range by combining pool range with min/max constraints
|
|
139
|
+
pool_start = self.start_range.value # type: ignore[attr-defined]
|
|
140
|
+
pool_end = self.end_range.value # type: ignore[attr-defined]
|
|
125
141
|
|
|
142
|
+
effective_start = pool_start
|
|
143
|
+
effective_end = pool_end
|
|
126
144
|
|
|
127
|
-
|
|
128
|
-
|
|
145
|
+
if parameters:
|
|
146
|
+
if parameters.min_value is not None:
|
|
147
|
+
effective_start = max(effective_start, parameters.min_value)
|
|
148
|
+
if parameters.max_value is not None:
|
|
149
|
+
effective_end = min(effective_end, parameters.max_value)
|
|
129
150
|
|
|
130
|
-
|
|
131
|
-
if
|
|
132
|
-
|
|
133
|
-
return num
|
|
151
|
+
# Check if the effective range is valid
|
|
152
|
+
if effective_start > effective_end:
|
|
153
|
+
raise PoolExhaustedError("There are no more values available in this pool.")
|
|
134
154
|
|
|
135
|
-
|
|
155
|
+
def skip_excluded(value: int) -> int | None:
|
|
156
|
+
"""Skip past any excluded values/ranges starting from value.
|
|
157
|
+
|
|
158
|
+
Returns the next non-excluded value, or None if we exceed effective_end.
|
|
159
|
+
"""
|
|
160
|
+
current = value
|
|
161
|
+
while current <= effective_end:
|
|
162
|
+
# Check if in an excluded range and skip past it
|
|
163
|
+
in_range = False
|
|
164
|
+
for range_start, range_end in excluded_ranges:
|
|
165
|
+
if range_start <= current <= range_end:
|
|
166
|
+
current = range_end + 1
|
|
167
|
+
in_range = True
|
|
168
|
+
break
|
|
169
|
+
if in_range:
|
|
170
|
+
continue
|
|
171
|
+
|
|
172
|
+
# Check if it's an excluded single value
|
|
173
|
+
if current in excluded_values:
|
|
174
|
+
current += 1
|
|
175
|
+
continue
|
|
176
|
+
|
|
177
|
+
# Found a non-excluded value
|
|
178
|
+
return current
|
|
179
|
+
|
|
180
|
+
return None
|
|
181
|
+
|
|
182
|
+
# Skip any excluded values at the start
|
|
183
|
+
first_valid = skip_excluded(effective_start)
|
|
184
|
+
if first_valid is None:
|
|
185
|
+
raise PoolExhaustedError("There are no more values available in this pool.")
|
|
186
|
+
min_value = first_valid
|
|
187
|
+
|
|
188
|
+
# Re-run the query until we find a non-excluded value or exhaust the pool
|
|
189
|
+
while True:
|
|
190
|
+
candidate = await self.get_free(db=db, branch=branch, min_value=min_value, max_value=effective_end)
|
|
191
|
+
if candidate is None:
|
|
192
|
+
raise PoolExhaustedError("There are no more values available in this pool.")
|
|
193
|
+
|
|
194
|
+
# Check if candidate is excluded (single value or range)
|
|
195
|
+
next_valid = skip_excluded(candidate)
|
|
196
|
+
if next_valid is None:
|
|
197
|
+
raise PoolExhaustedError("There are no more values available in this pool.")
|
|
198
|
+
|
|
199
|
+
if next_valid != candidate:
|
|
200
|
+
# Candidate was excluded, re-query starting from next valid point
|
|
201
|
+
min_value = next_valid
|
|
202
|
+
continue
|
|
203
|
+
|
|
204
|
+
# Candidate passed all checks
|
|
205
|
+
return candidate
|
infrahub/core/query/__init__.py
CHANGED
|
@@ -3,6 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
from collections import defaultdict
|
|
4
4
|
from dataclasses import dataclass, field
|
|
5
5
|
from enum import Enum
|
|
6
|
+
from functools import lru_cache
|
|
6
7
|
from typing import TYPE_CHECKING, Any, Callable, Generator, Iterator, TypeVar
|
|
7
8
|
|
|
8
9
|
import ujson
|
|
@@ -417,6 +418,11 @@ class Query:
|
|
|
417
418
|
Right now it's mainly used to add more labels to the metrics."""
|
|
418
419
|
return {}
|
|
419
420
|
|
|
421
|
+
@staticmethod
|
|
422
|
+
@lru_cache(maxsize=1024)
|
|
423
|
+
def _split_query_lines(query: str) -> list[str]:
|
|
424
|
+
return [line.strip() for line in query.split("\n") if line.strip()]
|
|
425
|
+
|
|
420
426
|
def add_to_query(self, query: str | list[str]) -> None:
|
|
421
427
|
"""Add a new section at the end of the query.
|
|
422
428
|
|
|
@@ -427,7 +433,7 @@ class Query:
|
|
|
427
433
|
for item in query:
|
|
428
434
|
self.add_to_query(query=item)
|
|
429
435
|
else:
|
|
430
|
-
self.query_lines.extend(
|
|
436
|
+
self.query_lines.extend(self._split_query_lines(query=query))
|
|
431
437
|
|
|
432
438
|
def add_subquery(self, subquery: str, node_alias: str, with_clause: str | None = None) -> None:
|
|
433
439
|
self.add_to_query(f"CALL ({node_alias}) {{")
|
infrahub/core/query/branch.py
CHANGED
|
@@ -24,15 +24,31 @@ class DeleteBranchRelationshipsQuery(Query):
|
|
|
24
24
|
async def query_init(self, db: InfrahubDatabase, **kwargs: Any) -> None: # noqa: ARG002
|
|
25
25
|
query = """
|
|
26
26
|
// --------------
|
|
27
|
-
// for every Node
|
|
28
|
-
// connected to the Node and delete them
|
|
27
|
+
// for every Node that only exists on this branch (it's about to be deleted),
|
|
28
|
+
// find any agnostic relationships or attributes connected to the Node and delete them
|
|
29
29
|
// --------------
|
|
30
30
|
OPTIONAL MATCH (:Root)<-[e:IS_PART_OF {status: "active"}]-(n:Node)
|
|
31
31
|
WHERE e.branch = $branch_name
|
|
32
|
+
// does the node only exist on this branch?
|
|
32
33
|
CALL (n) {
|
|
34
|
+
OPTIONAL MATCH (n)-[ipo:IS_PART_OF {status: "active"}]->(:Root)
|
|
35
|
+
WHERE ipo.branch <> $branch_name
|
|
36
|
+
LIMIT 1
|
|
37
|
+
RETURN ipo IS NOT NULL AS node_exists_on_other_branch
|
|
38
|
+
}
|
|
39
|
+
// if so, delete any linked agnostic relationships or attributes
|
|
40
|
+
CALL (n, node_exists_on_other_branch) {
|
|
41
|
+
WITH n, node_exists_on_other_branch
|
|
42
|
+
WHERE node_exists_on_other_branch = FALSE
|
|
33
43
|
OPTIONAL MATCH (n)-[:IS_RELATED {branch: $global_branch_name}]-(rel:Relationship)
|
|
34
44
|
DETACH DELETE rel
|
|
35
45
|
} IN TRANSACTIONS OF 500 ROWS
|
|
46
|
+
CALL (n, node_exists_on_other_branch) {
|
|
47
|
+
WITH n, node_exists_on_other_branch
|
|
48
|
+
WHERE node_exists_on_other_branch = FALSE
|
|
49
|
+
OPTIONAL MATCH (n)-[:HAS_ATTRIBUTE {branch: $global_branch_name}]-(attr:Attribute)
|
|
50
|
+
DETACH DELETE attr
|
|
51
|
+
} IN TRANSACTIONS OF 500 ROWS
|
|
36
52
|
|
|
37
53
|
// reduce the results to a single row
|
|
38
54
|
WITH 1 AS one
|