infrahub-server 1.7.1__py3-none-any.whl → 1.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. infrahub/actions/gather.py +2 -2
  2. infrahub/api/query.py +3 -2
  3. infrahub/api/transformation.py +3 -3
  4. infrahub/computed_attribute/gather.py +2 -0
  5. infrahub/config.py +2 -2
  6. infrahub/core/attribute.py +21 -2
  7. infrahub/core/diff/model/path.py +43 -0
  8. infrahub/core/graph/__init__.py +1 -1
  9. infrahub/core/graph/index.py +2 -0
  10. infrahub/core/ipam/resource_allocator.py +229 -0
  11. infrahub/core/migrations/graph/__init__.py +8 -0
  12. infrahub/core/migrations/graph/m052_fix_global_branch_level.py +51 -0
  13. infrahub/core/migrations/graph/m053_fix_branch_level_zero.py +61 -0
  14. infrahub/core/migrations/graph/m054_cleanup_orphaned_nodes.py +87 -0
  15. infrahub/core/migrations/graph/m055_remove_webhook_validate_certificates_default.py +86 -0
  16. infrahub/core/migrations/schema/node_attribute_add.py +17 -19
  17. infrahub/core/node/lock_utils.py +23 -2
  18. infrahub/core/node/resource_manager/ip_address_pool.py +5 -11
  19. infrahub/core/node/resource_manager/ip_prefix_pool.py +5 -21
  20. infrahub/core/node/resource_manager/number_pool.py +109 -39
  21. infrahub/core/query/__init__.py +7 -1
  22. infrahub/core/query/branch.py +18 -2
  23. infrahub/core/query/ipam.py +629 -40
  24. infrahub/core/query/node.py +128 -0
  25. infrahub/core/query/resource_manager.py +114 -1
  26. infrahub/core/relationship/model.py +1 -1
  27. infrahub/core/schema/definitions/core/webhook.py +0 -1
  28. infrahub/core/schema/definitions/internal.py +7 -4
  29. infrahub/core/validators/determiner.py +4 -0
  30. infrahub/graphql/analyzer.py +3 -1
  31. infrahub/graphql/app.py +7 -10
  32. infrahub/graphql/execution.py +95 -0
  33. infrahub/graphql/mutations/proposed_change.py +15 -0
  34. infrahub/graphql/parser.py +10 -7
  35. infrahub/graphql/queries/ipam.py +20 -25
  36. infrahub/graphql/queries/search.py +29 -9
  37. infrahub/proposed_change/tasks.py +2 -0
  38. infrahub/services/adapters/http/httpx.py +27 -0
  39. infrahub/trigger/catalogue.py +2 -0
  40. infrahub/trigger/models.py +73 -4
  41. infrahub/trigger/setup.py +1 -1
  42. infrahub/trigger/system.py +36 -0
  43. infrahub/webhook/models.py +4 -2
  44. infrahub/webhook/tasks.py +2 -2
  45. infrahub/workflows/initialization.py +2 -2
  46. {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/METADATA +3 -3
  47. {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/RECORD +52 -46
  48. infrahub_testcontainers/docker-compose-cluster.test.yml +16 -10
  49. infrahub_testcontainers/docker-compose.test.yml +11 -10
  50. infrahub/pools/address.py +0 -16
  51. {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/WHEEL +0 -0
  52. {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/entry_points.txt +0 -0
  53. {infrahub_server-1.7.1.dist-info → infrahub_server-1.7.2.dist-info}/licenses/LICENSE.txt +0 -0
@@ -1,6 +1,5 @@
1
1
  from __future__ import annotations
2
2
 
3
- from graphql import graphql
4
3
  from infrahub_sdk.graphql import Query
5
4
  from prefect import task
6
5
  from prefect.cache_policies import NONE
@@ -8,6 +7,7 @@ from prefect.cache_policies import NONE
8
7
  from infrahub.core.constants import InfrahubKind
9
8
  from infrahub.core.registry import registry
10
9
  from infrahub.database import InfrahubDatabase # noqa: TC001 needed for prefect flow
10
+ from infrahub.graphql.execution import execute_graphql_query
11
11
  from infrahub.graphql.initialization import prepare_graphql_params
12
12
 
13
13
  from .models import ActionTriggerRuleTriggerDefinition
@@ -95,7 +95,7 @@ async def gather_trigger_action_rules(db: InfrahubDatabase) -> list[ActionTrigge
95
95
  db=db,
96
96
  branch=registry.default_branch,
97
97
  )
98
- response = await graphql(
98
+ response = await execute_graphql_query(
99
99
  schema=gql_params.schema,
100
100
  source=trigger_query.render(),
101
101
  context_value=gql_params.context,
infrahub/api/query.py CHANGED
@@ -3,7 +3,6 @@ from __future__ import annotations
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
5
  from fastapi import APIRouter, Body, Depends, Path, Query, Request
6
- from graphql import graphql
7
6
  from pydantic import BaseModel, Field
8
7
 
9
8
  from infrahub.api.dependencies import BranchParams, get_branch_params, get_current_user, get_db
@@ -14,6 +13,7 @@ from infrahub.core.protocols import CoreGraphQLQuery
14
13
  from infrahub.database import InfrahubDatabase # noqa: TC001
15
14
  from infrahub.graphql.analyzer import InfrahubGraphQLQueryAnalyzer
16
15
  from infrahub.graphql.api.dependencies import build_graphql_query_permission_checker
16
+ from infrahub.graphql.execution import cached_parse, execute_graphql_query
17
17
  from infrahub.graphql.initialization import prepare_graphql_params
18
18
  from infrahub.graphql.metrics import (
19
19
  GRAPHQL_DURATION_METRICS,
@@ -75,6 +75,7 @@ async def execute_query(
75
75
  schema=gql_params.schema,
76
76
  schema_branch=schema_branch,
77
77
  branch=branch_params.branch,
78
+ document=cached_parse(gql_query.query.value),
78
79
  )
79
80
  await permission_checker.check(
80
81
  db=db,
@@ -93,7 +94,7 @@ async def execute_query(
93
94
  }
94
95
 
95
96
  with GRAPHQL_DURATION_METRICS.labels(**labels).time():
96
- result = await graphql(
97
+ result = await execute_graphql_query(
97
98
  schema=gql_params.schema,
98
99
  source=gql_query.query.value,
99
100
  context_value=gql_params.context,
@@ -3,7 +3,6 @@ from __future__ import annotations
3
3
  from typing import TYPE_CHECKING
4
4
 
5
5
  from fastapi import APIRouter, Depends, Path, Request
6
- from graphql import graphql
7
6
  from starlette.responses import JSONResponse, PlainTextResponse
8
7
 
9
8
  from infrahub.api.dependencies import (
@@ -22,6 +21,7 @@ from infrahub.core.protocols import (
22
21
  )
23
22
  from infrahub.database import InfrahubDatabase # noqa: TC001
24
23
  from infrahub.exceptions import TransformError
24
+ from infrahub.graphql.execution import execute_graphql_query
25
25
  from infrahub.graphql.initialization import prepare_graphql_params
26
26
  from infrahub.graphql.utils import extract_data
27
27
  from infrahub.transformations.models import TransformJinjaTemplateData, TransformPythonData
@@ -69,7 +69,7 @@ async def transform_python(
69
69
  db=dbs, branch=branch_params.branch, at=branch_params.at, service=request.app.state.service
70
70
  )
71
71
 
72
- result = await graphql(
72
+ result = await execute_graphql_query(
73
73
  schema=gql_params.schema,
74
74
  source=query.query.value,
75
75
  context_value=gql_params.context,
@@ -134,7 +134,7 @@ async def transform_jinja2(
134
134
  db=dbs, branch=branch_params.branch, at=branch_params.at, service=request.app.state.service
135
135
  )
136
136
 
137
- result = await graphql(
137
+ result = await execute_graphql_query(
138
138
  schema=gql_params.schema,
139
139
  source=query.query.value,
140
140
  context_value=gql_params.context,
@@ -14,6 +14,7 @@ from infrahub.core.registry import registry
14
14
  from infrahub.database import InfrahubDatabase # noqa: TC001 needed for prefect flow
15
15
  from infrahub.git.utils import get_repositories_commit_per_branch
16
16
  from infrahub.graphql.analyzer import InfrahubGraphQLQueryAnalyzer
17
+ from infrahub.graphql.execution import cached_parse
17
18
  from infrahub.graphql.initialization import prepare_graphql_params
18
19
 
19
20
  from .models import (
@@ -74,6 +75,7 @@ async def gather_python_transform_attributes(
74
75
  branch=branch,
75
76
  schema_branch=schema_branch,
76
77
  schema=graphql_params.schema,
78
+ document=cached_parse(query.query.value),
77
79
  )
78
80
  for attribute in transform_attributes[transform.name.value]:
79
81
  python_transform_computed_attribute = PythonTransformComputedAttribute(
infrahub/config.py CHANGED
@@ -532,8 +532,8 @@ class HTTPSettings(BaseSettings):
532
532
 
533
533
  return self
534
534
 
535
- def get_tls_context(self) -> ssl.SSLContext:
536
- if self.tls_insecure:
535
+ def get_tls_context(self, force_verify: bool = False) -> ssl.SSLContext:
536
+ if self.tls_insecure and not force_verify:
537
537
  return ssl._create_unverified_context()
538
538
 
539
539
  if not self.tls_ca_bundle:
@@ -366,6 +366,24 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin, MetadataInterface):
366
366
 
367
367
  return await self._update(db=db, user_id=user_id, at=save_at)
368
368
 
369
+ def get_branch_for_delete(self) -> Branch:
370
+ """Get the appropriate branch for explicit attribute delete operations.
371
+
372
+ For branch-agnostic attributes on branch-aware nodes, use the current branch
373
+ to create branch-scoped deletion edges rather than global deletion.
374
+
375
+ Returns:
376
+ Branch: The branch to use for the delete operation
377
+ """
378
+ if (
379
+ self.schema.branch == BranchSupportType.AGNOSTIC
380
+ and self.node is not None
381
+ and self.node._schema.branch == BranchSupportType.AWARE
382
+ ):
383
+ return self.branch
384
+
385
+ return self.get_branch_based_on_support_type()
386
+
369
387
  async def delete(
370
388
  self, db: InfrahubDatabase, user_id: str = SYSTEM_USER_ID, at: Timestamp | None = None
371
389
  ) -> AttributeChangelog | None:
@@ -373,7 +391,7 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin, MetadataInterface):
373
391
  return None
374
392
 
375
393
  delete_at = Timestamp(at)
376
- branch = self.get_branch_based_on_support_type()
394
+ branch = self.get_branch_for_delete()
377
395
 
378
396
  query = await AttributeDeleteQuery.init(db=db, branch=branch, attr=self, user_id=user_id, at=delete_at)
379
397
  await query.execute(db=db)
@@ -642,9 +660,10 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin, MetadataInterface):
642
660
  hierarchy_level = branch.hierarchy_level
643
661
  if self.schema.branch == BranchSupportType.AGNOSTIC:
644
662
  branch = registry.get_global_branch()
663
+ hierarchy_level = 1
645
664
  elif self.schema.branch == BranchSupportType.LOCAL and self.node._schema.branch == BranchSupportType.AGNOSTIC:
646
665
  branch = registry.get_global_branch()
647
- hierarchy_level = 0
666
+ hierarchy_level = 1
648
667
  data = AttributeCreateData(
649
668
  uuid=str(UUIDT()),
650
669
  name=self.name,
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from copy import deepcopy
3
4
  from dataclasses import asdict, dataclass, field
4
5
  from enum import Enum
5
6
  from typing import TYPE_CHECKING, Any
@@ -269,6 +270,27 @@ class EnrichedDiffRelationship(BaseSummary):
269
270
  def __hash__(self) -> int:
270
271
  return hash(self.name)
271
272
 
273
+ def __deepcopy__(self, memo: dict[int, Any]) -> EnrichedDiffRelationship:
274
+ """Custom deepcopy to handle circular references with EnrichedDiffNode.
275
+
276
+ The default deepcopy can fail because it may call __hash__ on a partially
277
+ constructed instance (before 'name' is set) when handling circular references
278
+ through the nodes -> relationships cycle.
279
+
280
+ This implementation ensures 'name' is set and the instance is registered
281
+ in memo before deepcopying other attributes that may have circular references.
282
+ """
283
+ new_obj = object.__new__(EnrichedDiffRelationship)
284
+ # Set the hashable attribute first (required for __hash__)
285
+ new_obj.name = self.name
286
+ # Register in memo BEFORE copying other attributes to handle circular refs
287
+ memo[id(self)] = new_obj
288
+ # Deepcopy all other attributes
289
+ for key, value in self.__dict__.items():
290
+ if key != "name":
291
+ setattr(new_obj, key, deepcopy(value, memo))
292
+ return new_obj
293
+
272
294
  @property
273
295
  def num_properties(self) -> int:
274
296
  return sum(r.num_properties for r in self.relationships)
@@ -327,6 +349,27 @@ class EnrichedDiffNode(BaseSummary):
327
349
  def __hash__(self) -> int:
328
350
  return hash(self.identifier)
329
351
 
352
+ def __deepcopy__(self, memo: dict[int, Any]) -> EnrichedDiffNode:
353
+ """Custom deepcopy to handle circular references with EnrichedDiffRelationship.
354
+
355
+ The default deepcopy can fail because it may call __hash__ on a partially
356
+ constructed instance (before 'identifier' is set) when handling circular references
357
+ through the relationships -> nodes cycle.
358
+
359
+ This implementation ensures 'identifier' is set and the instance is registered
360
+ in memo before deepcopying other attributes that may have circular references.
361
+ """
362
+ new_obj = object.__new__(EnrichedDiffNode)
363
+ # Set the hashable attribute first (required for __hash__)
364
+ new_obj.identifier = deepcopy(self.identifier, memo)
365
+ # Register in memo BEFORE copying other attributes to handle circular refs
366
+ memo[id(self)] = new_obj
367
+ # Deepcopy all other attributes
368
+ for key, value in self.__dict__.items():
369
+ if key != "identifier":
370
+ setattr(new_obj, key, deepcopy(value, memo))
371
+ return new_obj
372
+
330
373
  @property
331
374
  def uuid(self) -> str:
332
375
  return self.identifier.uuid
@@ -1 +1 @@
1
- GRAPH_VERSION = 51
1
+ GRAPH_VERSION = 55
@@ -13,6 +13,8 @@ node_indexes: list[IndexItem] = [
13
13
  IndexItem(name="rel_uuid", label="Relationship", properties=["uuid"], type=IndexType.RANGE),
14
14
  IndexItem(name="rel_identifier", label="Relationship", properties=["name"], type=IndexType.RANGE),
15
15
  IndexItem(name="attr_value_indexed", label="AttributeValueIndexed", properties=["value"], type=IndexType.RANGE),
16
+ # TEXT index for efficient CONTAINS/STARTS WITH/ENDS WITH searches on attribute values
17
+ IndexItem(name="attr_value_indexed_text", label="AttributeValueIndexed", properties=["value"], type=IndexType.TEXT),
16
18
  # diff indices
17
19
  IndexItem(name="diff_uuid", label="DiffRoot", properties=["uuid"], type=IndexType.TEXT),
18
20
  IndexItem(name="diff_node_uuid", label="DiffNode", properties=["uuid"], type=IndexType.TEXT),
@@ -0,0 +1,229 @@
1
+ from __future__ import annotations
2
+
3
+ import ipaddress
4
+ from typing import TYPE_CHECKING, Iterable
5
+
6
+ from infrahub.core.query.ipam import (
7
+ IPAddressData,
8
+ IPPrefixData,
9
+ IPPrefixIPAddressFetch,
10
+ IPPrefixIPAddressFetchFree,
11
+ IPPrefixSubnetFetch,
12
+ IPPrefixSubnetFetchFree,
13
+ IPv6PrefixIPAddressFetchFree,
14
+ IPv6PrefixSubnetFetchFree,
15
+ )
16
+
17
+ if TYPE_CHECKING:
18
+ from infrahub.core.branch import Branch
19
+ from infrahub.core.ipam.constants import IPAddressType, IPNetworkType
20
+ from infrahub.core.node import Node
21
+ from infrahub.core.timestamp import Timestamp
22
+ from infrahub.database import InfrahubDatabase
23
+
24
+
25
+ class IPAMResourceAllocator:
26
+ """Allocator for IPAM resources (prefixes and addresses) within pools.
27
+
28
+ This class provides optimized methods for finding the next available
29
+ IP prefix or address within a parent prefix. It uses database-side
30
+ Cypher computation for performance, which is especially important
31
+ for IPv6 allocations where the address space is very large.
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ db: InfrahubDatabase,
37
+ namespace: Node | str | None = None,
38
+ branch: Branch | None = None,
39
+ branch_agnostic: bool = False,
40
+ ) -> None:
41
+ """Initialize the IPAM resource allocator.
42
+
43
+ Args:
44
+ db: Database connection to use for queries.
45
+ namespace: IP namespace node or its ID. If None, uses the default namespace.
46
+ branch: Branch to query. If None, uses the default branch.
47
+ branch_agnostic: If True, queries across all branches.
48
+ """
49
+ self.db = db
50
+ self.namespace = namespace
51
+ self.branch = branch
52
+ self.branch_agnostic = branch_agnostic
53
+
54
+ async def _get_next_ipv4_prefix(
55
+ self, ip_prefix: IPNetworkType, target_prefix_length: int, at: Timestamp | str | None = None
56
+ ) -> IPNetworkType | None:
57
+ """Get the next available free IPv4 prefix.
58
+
59
+ Uses integer arithmetic in Cypher for efficient gap detection in the
60
+ IPv4 address space.
61
+
62
+ Args:
63
+ ip_prefix: Parent prefix to allocate from.
64
+ target_prefix_length: Desired prefix length for the new prefix (0-32).
65
+ at: Optional timestamp for point-in-time queries.
66
+
67
+ Returns:
68
+ The next available IPv4 prefix, or None if no space is available
69
+ or if the target_prefix_length is invalid.
70
+ """
71
+ if target_prefix_length < 0 or target_prefix_length > 32:
72
+ return None
73
+ if target_prefix_length < ip_prefix.prefixlen:
74
+ return None
75
+
76
+ query = await IPPrefixSubnetFetchFree.init(
77
+ db=self.db,
78
+ branch=self.branch,
79
+ obj=ip_prefix,
80
+ target_prefixlen=target_prefix_length,
81
+ namespace=self.namespace,
82
+ at=at,
83
+ branch_agnostic=self.branch_agnostic,
84
+ )
85
+ await query.execute(db=self.db)
86
+
87
+ result_data = query.get_prefix_data()
88
+ if not result_data:
89
+ return None
90
+
91
+ network_address = ipaddress.IPv4Address(result_data.free_start)
92
+ return ipaddress.ip_network(f"{network_address}/{target_prefix_length}")
93
+
94
+ async def _get_next_ipv6_prefix(
95
+ self, ip_prefix: IPNetworkType, target_prefix_length: int, at: Timestamp | str | None = None
96
+ ) -> IPNetworkType | None:
97
+ """Get the next available free IPv6 prefix.
98
+
99
+ Uses binary string operations in Cypher to handle IPv6's 128-bit address
100
+ space, as integer values would overflow Neo4j's 64-bit integer type.
101
+
102
+ Args:
103
+ ip_prefix: Parent prefix to allocate from.
104
+ target_prefix_length: Desired prefix length for the new prefix (0-128).
105
+ at: Optional timestamp for point-in-time queries.
106
+
107
+ Returns:
108
+ The next available IPv6 prefix, or None if no space is available
109
+ or if the target_prefix_length is invalid.
110
+ """
111
+ if target_prefix_length < 0 or target_prefix_length > 128:
112
+ return None
113
+ if target_prefix_length < ip_prefix.prefixlen:
114
+ return None
115
+
116
+ query = await IPv6PrefixSubnetFetchFree.init(
117
+ db=self.db,
118
+ branch=self.branch,
119
+ obj=ip_prefix,
120
+ target_prefixlen=target_prefix_length,
121
+ namespace=self.namespace,
122
+ at=at,
123
+ branch_agnostic=self.branch_agnostic,
124
+ )
125
+ await query.execute(db=self.db)
126
+
127
+ result_data = query.get_prefix_data()
128
+ if not result_data:
129
+ return None
130
+
131
+ # Convert binary string to IPv6 address
132
+ addr_int = int(result_data.free_start_bin, 2)
133
+ network_address = ipaddress.IPv6Address(addr_int)
134
+ return ipaddress.ip_network(f"{network_address}/{target_prefix_length}")
135
+
136
+ async def get_next_prefix(
137
+ self, ip_prefix: IPNetworkType, target_prefix_length: int, at: Timestamp | str | None = None
138
+ ) -> IPNetworkType | None:
139
+ """Get the next available free prefix of specified length within a parent prefix.
140
+
141
+ Automatically selects the appropriate method based on IP version (IPv4 vs IPv6).
142
+
143
+ Args:
144
+ ip_prefix: Parent prefix to allocate from.
145
+ target_prefix_length: Desired prefix length for the new prefix.
146
+ at: Optional timestamp for point-in-time queries.
147
+
148
+ Returns:
149
+ The next available prefix, or None if no space is available.
150
+ """
151
+ if ip_prefix.version == 4:
152
+ return await self._get_next_ipv4_prefix(
153
+ ip_prefix=ip_prefix, target_prefix_length=target_prefix_length, at=at
154
+ )
155
+ return await self._get_next_ipv6_prefix(ip_prefix=ip_prefix, target_prefix_length=target_prefix_length, at=at)
156
+
157
+ async def get_next_address(
158
+ self, ip_prefix: IPNetworkType, at: Timestamp | str | None = None, is_pool: bool = False
159
+ ) -> IPAddressType | None:
160
+ """Get the next available free IP address within a prefix.
161
+
162
+ Automatically selects the appropriate query based on IP version (IPv4 vs IPv6).
163
+
164
+ Args:
165
+ ip_prefix: Prefix to allocate an address from.
166
+ at: Optional timestamp for point-in-time queries.
167
+ is_pool: If True, includes network and broadcast addresses as allocatable.
168
+ If False (default), reserves the first and last addresses.
169
+
170
+ Returns:
171
+ The next available IP address, or None if no addresses are available.
172
+ """
173
+ # Use IPv6-specific query for IPv6 to avoid 64-bit integer overflow
174
+ query_class = IPv6PrefixIPAddressFetchFree if ip_prefix.version == 6 else IPPrefixIPAddressFetchFree
175
+ query = await query_class.init(
176
+ db=self.db,
177
+ branch=self.branch,
178
+ obj=ip_prefix,
179
+ namespace=self.namespace,
180
+ at=at,
181
+ branch_agnostic=self.branch_agnostic,
182
+ is_pool=is_pool,
183
+ )
184
+ await query.execute(db=self.db)
185
+ return query.get_address()
186
+
187
+ async def get_subnets(self, ip_prefix: IPNetworkType, at: Timestamp | str | None = None) -> Iterable[IPPrefixData]:
188
+ """Get all subnets within a parent prefix.
189
+
190
+ Args:
191
+ ip_prefix: Parent prefix to query.
192
+ at: Optional timestamp for point-in-time queries.
193
+
194
+ Returns:
195
+ Iterable of IPPrefixData objects representing child subnets.
196
+ """
197
+ query = await IPPrefixSubnetFetch.init(
198
+ db=self.db,
199
+ branch=self.branch,
200
+ obj=ip_prefix,
201
+ namespace=self.namespace,
202
+ at=at,
203
+ branch_agnostic=self.branch_agnostic,
204
+ )
205
+ await query.execute(db=self.db)
206
+ return query.get_subnets()
207
+
208
+ async def get_ip_addresses(
209
+ self, ip_prefix: IPNetworkType, at: Timestamp | str | None = None
210
+ ) -> Iterable[IPAddressData]:
211
+ """Get all IP addresses within a prefix.
212
+
213
+ Args:
214
+ ip_prefix: Prefix to query.
215
+ at: Optional timestamp for point-in-time queries.
216
+
217
+ Returns:
218
+ Iterable of IPAddressData objects representing addresses in the prefix.
219
+ """
220
+ query = await IPPrefixIPAddressFetch.init(
221
+ db=self.db,
222
+ branch=self.branch,
223
+ obj=ip_prefix,
224
+ namespace=self.namespace,
225
+ at=at,
226
+ branch_agnostic=self.branch_agnostic,
227
+ )
228
+ await query.execute(db=self.db)
229
+ return query.get_addresses()
@@ -53,6 +53,10 @@ from .m048_undelete_rel_props import Migration048
53
53
  from .m049_remove_is_visible_relationship import Migration049
54
54
  from .m050_backfill_vertex_metadata import Migration050
55
55
  from .m051_subtract_branched_from_microsecond import Migration051
56
+ from .m052_fix_global_branch_level import Migration052
57
+ from .m053_fix_branch_level_zero import Migration053
58
+ from .m054_cleanup_orphaned_nodes import Migration054
59
+ from .m055_remove_webhook_validate_certificates_default import Migration055
56
60
 
57
61
  if TYPE_CHECKING:
58
62
  from ..shared import MigrationTypes
@@ -110,6 +114,10 @@ MIGRATIONS: list[type[MigrationTypes]] = [
110
114
  Migration049,
111
115
  Migration050,
112
116
  Migration051,
117
+ Migration052,
118
+ Migration053,
119
+ Migration054,
120
+ Migration055,
113
121
  ]
114
122
 
115
123
 
@@ -0,0 +1,51 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any, Sequence
4
+
5
+ from infrahub.core.constants import GLOBAL_BRANCH_NAME
6
+ from infrahub.core.migrations.shared import GraphMigration, MigrationInput, MigrationResult
7
+ from infrahub.core.query import Query, QueryType
8
+
9
+ if TYPE_CHECKING:
10
+ from infrahub.database import InfrahubDatabase
11
+
12
+
13
+ class FixGlobalBranchLevelQuery(Query):
14
+ """Update edges on the global branch to have branch_level = 1."""
15
+
16
+ name = "fix_global_branch_level"
17
+ type: QueryType = QueryType.WRITE
18
+ insert_return = False
19
+ raise_error_if_empty = False
20
+
21
+ async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
22
+ self.params["global_branch_name"] = GLOBAL_BRANCH_NAME
23
+
24
+ query = """
25
+ MATCH ()-[e {branch: $global_branch_name}]->()
26
+ WHERE e.branch_level <> 1
27
+ CALL (e) {
28
+ SET e.branch_level = 1
29
+ } IN TRANSACTIONS
30
+ """
31
+ self.add_to_query(query)
32
+
33
+
34
+ class Migration052(GraphMigration):
35
+ """
36
+ Fix edges on the global branch that have incorrect branch_level.
37
+
38
+ Edges on the global branch ("-global-") should always have branch_level = 1.
39
+ This migration corrects any edges that were incorrectly created with a different
40
+ branch_level value (e.g., branch_level = 2 when created from a user branch).
41
+ """
42
+
43
+ name: str = "052_fix_global_branch_level"
44
+ minimum_version: int = 51
45
+ queries: Sequence[type[Query]] = [FixGlobalBranchLevelQuery]
46
+
47
+ async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
48
+ return MigrationResult()
49
+
50
+ async def execute(self, migration_input: MigrationInput) -> MigrationResult:
51
+ return await self.do_execute(migration_input=migration_input)
@@ -0,0 +1,61 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any, Sequence
4
+
5
+ from infrahub.core.constants import GLOBAL_BRANCH_NAME
6
+ from infrahub.core.migrations.shared import GraphMigration, MigrationInput, MigrationResult
7
+ from infrahub.core.query import Query, QueryType
8
+
9
+ if TYPE_CHECKING:
10
+ from infrahub.database import InfrahubDatabase
11
+
12
+
13
+ class FixBranchLevelZeroQuery(Query):
14
+ """Update edges with branch_level=0 to the correct branch_level.
15
+
16
+ Edges on the global branch or default branch should have branch_level=1.
17
+ Edges on other branches should have branch_level=2.
18
+ """
19
+
20
+ name = "fix_branch_level_zero"
21
+ type: QueryType = QueryType.WRITE
22
+ insert_return = False
23
+ raise_error_if_empty = False
24
+
25
+ async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
26
+ self.params["global_branch_name"] = GLOBAL_BRANCH_NAME
27
+
28
+ query = """
29
+ MATCH (branch_node:Branch {is_default: true})
30
+ WITH branch_node.name AS default_branch_name
31
+ MATCH ()-[e]->()
32
+ WHERE e.branch_level = 0
33
+ CALL (e, default_branch_name) {
34
+ SET e.branch_level = CASE
35
+ WHEN e.branch = $global_branch_name OR e.branch = default_branch_name THEN 1
36
+ ELSE 2
37
+ END
38
+ } IN TRANSACTIONS
39
+ """
40
+ self.add_to_query(query)
41
+
42
+
43
+ class Migration053(GraphMigration):
44
+ """
45
+ Fix edges with branch_level=0 to have the correct branch_level.
46
+
47
+ Edges with branch_level=0 indicate a bug where the branch level was not
48
+ properly set during creation. This migration fixes them:
49
+ - Edges on the global branch ("-global-") or default branch: branch_level=1
50
+ - Edges on other branches: branch_level=2
51
+ """
52
+
53
+ name: str = "053_fix_branch_level_zero"
54
+ minimum_version: int = 52
55
+ queries: Sequence[type[Query]] = [FixBranchLevelZeroQuery]
56
+
57
+ async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
58
+ return MigrationResult()
59
+
60
+ async def execute(self, migration_input: MigrationInput) -> MigrationResult:
61
+ return await self.do_execute(migration_input=migration_input)