infrahub-server 1.2.2__py3-none-any.whl → 1.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. infrahub/cli/git_agent.py +4 -10
  2. infrahub/computed_attribute/tasks.py +8 -8
  3. infrahub/config.py +35 -0
  4. infrahub/core/constants/__init__.py +1 -0
  5. infrahub/core/constraint/node/runner.py +6 -5
  6. infrahub/core/graph/__init__.py +1 -1
  7. infrahub/core/migrations/graph/__init__.py +6 -1
  8. infrahub/core/migrations/graph/m018_uniqueness_nulls.py +68 -70
  9. infrahub/core/migrations/graph/m024_missing_hierarchy_backfill.py +69 -0
  10. infrahub/core/migrations/graph/m025_uniqueness_nulls.py +26 -0
  11. infrahub/core/migrations/schema/node_attribute_remove.py +16 -2
  12. infrahub/core/models.py +7 -1
  13. infrahub/core/node/__init__.py +4 -4
  14. infrahub/core/node/constraints/grouped_uniqueness.py +30 -10
  15. infrahub/core/query/ipam.py +1 -1
  16. infrahub/core/registry.py +18 -0
  17. infrahub/core/schema/basenode_schema.py +21 -1
  18. infrahub/core/schema/definitions/internal.py +2 -1
  19. infrahub/core/schema/generated/base_node_schema.py +1 -1
  20. infrahub/core/schema/manager.py +21 -0
  21. infrahub/core/schema/schema_branch.py +17 -9
  22. infrahub/database/__init__.py +10 -0
  23. infrahub/events/group_action.py +6 -1
  24. infrahub/events/node_action.py +5 -1
  25. infrahub/git/integrator.py +9 -7
  26. infrahub/graphql/mutations/main.py +10 -12
  27. infrahub/menu/repository.py +6 -6
  28. infrahub/message_bus/messages/__init__.py +0 -2
  29. infrahub/message_bus/operations/__init__.py +0 -1
  30. infrahub/message_bus/operations/event/__init__.py +2 -2
  31. infrahub/server.py +6 -11
  32. infrahub/services/adapters/cache/__init__.py +17 -0
  33. infrahub/services/adapters/cache/redis.py +11 -1
  34. infrahub/services/adapters/message_bus/__init__.py +20 -0
  35. infrahub/services/component.py +1 -2
  36. infrahub/tasks/registry.py +3 -7
  37. infrahub/workers/infrahub_async.py +4 -10
  38. infrahub_sdk/client.py +6 -6
  39. infrahub_sdk/ctl/cli_commands.py +32 -37
  40. infrahub_sdk/ctl/render.py +39 -0
  41. infrahub_sdk/exceptions.py +6 -2
  42. infrahub_sdk/generator.py +1 -1
  43. infrahub_sdk/node.py +38 -11
  44. infrahub_sdk/protocols_base.py +8 -1
  45. infrahub_sdk/pytest_plugin/items/jinja2_transform.py +22 -26
  46. infrahub_sdk/schema/__init__.py +10 -1
  47. infrahub_sdk/store.py +351 -75
  48. infrahub_sdk/template/__init__.py +209 -0
  49. infrahub_sdk/template/exceptions.py +38 -0
  50. infrahub_sdk/template/filters.py +151 -0
  51. infrahub_sdk/template/models.py +10 -0
  52. infrahub_sdk/utils.py +7 -0
  53. {infrahub_server-1.2.2.dist-info → infrahub_server-1.2.4.dist-info}/METADATA +2 -1
  54. {infrahub_server-1.2.2.dist-info → infrahub_server-1.2.4.dist-info}/RECORD +61 -59
  55. infrahub_testcontainers/container.py +6 -0
  56. infrahub_testcontainers/docker-compose.test.yml +1 -0
  57. infrahub_testcontainers/haproxy.cfg +3 -3
  58. infrahub_testcontainers/helpers.py +1 -1
  59. infrahub/message_bus/messages/event_worker_newprimaryapi.py +0 -9
  60. infrahub/message_bus/operations/event/worker.py +0 -9
  61. infrahub/support/__init__.py +0 -0
  62. infrahub/support/macro.py +0 -69
  63. {infrahub_server-1.2.2.dist-info → infrahub_server-1.2.4.dist-info}/LICENSE.txt +0 -0
  64. {infrahub_server-1.2.2.dist-info → infrahub_server-1.2.4.dist-info}/WHEEL +0 -0
  65. {infrahub_server-1.2.2.dist-info → infrahub_server-1.2.4.dist-info}/entry_points.txt +0 -0
infrahub/cli/git_agent.py CHANGED
@@ -19,10 +19,8 @@ from infrahub.git import initialize_repositories_directory
19
19
  from infrahub.lock import initialize_lock
20
20
  from infrahub.log import get_logger
21
21
  from infrahub.services import InfrahubServices
22
- from infrahub.services.adapters.cache.nats import NATSCache
23
- from infrahub.services.adapters.cache.redis import RedisCache
24
- from infrahub.services.adapters.message_bus.nats import NATSMessageBus
25
- from infrahub.services.adapters.message_bus.rabbitmq import RabbitMQMessageBus
22
+ from infrahub.services.adapters.cache import InfrahubCache
23
+ from infrahub.services.adapters.message_bus import InfrahubMessageBus
26
24
  from infrahub.services.adapters.workflow.local import WorkflowLocalExecution
27
25
  from infrahub.services.adapters.workflow.worker import WorkflowWorkerExecution
28
26
  from infrahub.trace import configure_trace
@@ -121,13 +119,9 @@ async def start(
121
119
 
122
120
  component_type = ComponentType.GIT_AGENT
123
121
  message_bus = config.OVERRIDE.message_bus or (
124
- await NATSMessageBus.new(component_type=component_type)
125
- if config.SETTINGS.broker.driver == config.BrokerDriver.NATS
126
- else await RabbitMQMessageBus.new(component_type=component_type)
127
- )
128
- cache = config.OVERRIDE.cache or (
129
- await NATSCache.new() if config.SETTINGS.cache.driver == config.CacheDriver.NATS else RedisCache()
122
+ await InfrahubMessageBus.new_from_driver(component_type=component_type, driver=config.SETTINGS.broker.driver)
130
123
  )
124
+ cache = config.OVERRIDE.cache or (await InfrahubCache.new_from_driver(driver=config.SETTINGS.cache.driver))
131
125
 
132
126
  service = await InfrahubServices.new(
133
127
  cache=cache,
@@ -6,6 +6,7 @@ from infrahub_sdk.protocols import (
6
6
  CoreNode, # noqa: TC002
7
7
  CoreTransformPython,
8
8
  )
9
+ from infrahub_sdk.template import Jinja2Template
9
10
  from prefect import flow
10
11
  from prefect.client.orchestration import get_client
11
12
  from prefect.logging import get_run_logger
@@ -16,7 +17,6 @@ from infrahub.core.registry import registry
16
17
  from infrahub.events import BranchDeletedEvent
17
18
  from infrahub.git.repository import get_initialized_repo
18
19
  from infrahub.services import InfrahubServices # noqa: TC001 needed for prefect flow
19
- from infrahub.support.macro import MacroDefinition
20
20
  from infrahub.trigger.models import TriggerType
21
21
  from infrahub.trigger.setup import setup_triggers
22
22
  from infrahub.workflows.catalogue import (
@@ -173,15 +173,15 @@ async def update_computed_attribute_value_jinja2(
173
173
 
174
174
  await add_tags(branches=[branch_name], nodes=[obj.id], db_change=True)
175
175
 
176
- macro_definition = MacroDefinition(macro=template_value)
177
- my_filter = {}
178
- for variable in macro_definition.variables:
176
+ jinja_template = Jinja2Template(template=template_value)
177
+ variables = {}
178
+ for variable in jinja_template.get_variables():
179
179
  components = variable.split("__")
180
180
  if len(components) == 2:
181
181
  property_name = components[0]
182
182
  property_value = components[1]
183
183
  attribute_property = getattr(obj, property_name)
184
- my_filter[variable] = getattr(attribute_property, property_value)
184
+ variables[variable] = getattr(attribute_property, property_value)
185
185
  elif len(components) == 3:
186
186
  relationship_name = components[0]
187
187
  property_name = components[1]
@@ -189,11 +189,11 @@ async def update_computed_attribute_value_jinja2(
189
189
  relationship = getattr(obj, relationship_name)
190
190
  try:
191
191
  attribute_property = getattr(relationship.peer, property_name)
192
- my_filter[variable] = getattr(attribute_property, property_value)
192
+ variables[variable] = getattr(attribute_property, property_value)
193
193
  except ValueError:
194
- my_filter[variable] = ""
194
+ variables[variable] = ""
195
195
 
196
- value = macro_definition.render(variables=my_filter)
196
+ value = await jinja_template.render(variables=variables)
197
197
  existing_value = getattr(obj, attribute_name).value
198
198
  if value == existing_value:
199
199
  log.debug(f"Ignoring to update {obj} with existing value on {attribute_name}={value}")
infrahub/config.py CHANGED
@@ -115,11 +115,43 @@ class BrokerDriver(str, Enum):
115
115
  RabbitMQ = "rabbitmq"
116
116
  NATS = "nats"
117
117
 
118
+ @property
119
+ def driver_module_path(self) -> str:
120
+ match self:
121
+ case BrokerDriver.NATS:
122
+ return "infrahub.services.adapters.message_bus.nats"
123
+ case BrokerDriver.RabbitMQ:
124
+ return "infrahub.services.adapters.message_bus.rabbitmq"
125
+
126
+ @property
127
+ def driver_class_name(self) -> str:
128
+ match self:
129
+ case BrokerDriver.NATS:
130
+ return "NATSMessageBus"
131
+ case BrokerDriver.RabbitMQ:
132
+ return "RabbitMQMessageBus"
133
+
118
134
 
119
135
  class CacheDriver(str, Enum):
120
136
  Redis = "redis"
121
137
  NATS = "nats"
122
138
 
139
+ @property
140
+ def driver_module_path(self) -> str:
141
+ match self:
142
+ case CacheDriver.NATS:
143
+ return "infrahub.services.adapters.cache.nats"
144
+ case CacheDriver.Redis:
145
+ return "infrahub.services.adapters.cache.redis"
146
+
147
+ @property
148
+ def driver_class_name(self) -> str:
149
+ match self:
150
+ case CacheDriver.NATS:
151
+ return "NATSCache"
152
+ case CacheDriver.Redis:
153
+ return "RedisCache"
154
+
123
155
 
124
156
  class WorkflowDriver(str, Enum):
125
157
  LOCAL = "local"
@@ -612,6 +644,9 @@ class SecuritySettings(BaseSettings):
612
644
  oauth2_provider_settings: SecurityOAuth2ProviderSettings = Field(default_factory=SecurityOAuth2ProviderSettings)
613
645
  oidc_providers: list[OIDCProvider] = Field(default_factory=list, description="The selected OIDC providers")
614
646
  oidc_provider_settings: SecurityOIDCProviderSettings = Field(default_factory=SecurityOIDCProviderSettings)
647
+ restrict_untrusted_jinja2_filters: bool = Field(
648
+ default=True, description="Indicates if untrusted Jinja2 filters should be disallowd for computed attributes"
649
+ )
615
650
  _oauth2_settings: dict[str, SecurityOAuth2Settings] = PrivateAttr(default_factory=dict)
616
651
  _oidc_settings: dict[str, SecurityOIDCSettings] = PrivateAttr(default_factory=dict)
617
652
  sso_user_default_group: str | None = Field(
@@ -348,6 +348,7 @@ RESTRICTED_NAMESPACES: list[str] = [
348
348
  NODE_NAME_REGEX = r"^[A-Z][a-zA-Z0-9]+$"
349
349
  DEFAULT_NAME_MIN_LENGTH = 2
350
350
  NAME_REGEX = r"^[a-z0-9\_]+$"
351
+ NAME_REGEX_OR_EMPTY = r"^[a-z0-9\_]*$"
351
352
  DEFAULT_DESCRIPTION_LENGTH = 128
352
353
 
353
354
  DEFAULT_NAME_MAX_LENGTH = 32
@@ -23,10 +23,15 @@ class NodeConstraintRunner:
23
23
  self.uniqueness_constraint = uniqueness_constraint
24
24
  self.relationship_manager_constraints = relationship_manager_constraints
25
25
 
26
- async def check(self, node: Node, field_filters: list[str] | None = None) -> None:
26
+ async def check(
27
+ self, node: Node, field_filters: list[str] | None = None, skip_uniqueness_check: bool = False
28
+ ) -> None:
27
29
  async with self.db.start_session() as db:
28
30
  await node.resolve_relationships(db=db)
29
31
 
32
+ if not skip_uniqueness_check:
33
+ await self.uniqueness_constraint.check(node, filters=field_filters)
34
+
30
35
  for relationship_name in node.get_schema().relationship_names:
31
36
  if field_filters and relationship_name not in field_filters:
32
37
  continue
@@ -34,7 +39,3 @@ class NodeConstraintRunner:
34
39
  await relationship_manager.fetch_relationship_ids(db=db, force_refresh=True)
35
40
  for relationship_constraint in self.relationship_manager_constraints:
36
41
  await relationship_constraint.check(relm=relationship_manager, node_schema=node.get_schema())
37
-
38
- # If HFID constraint is the only constraint violated, all other constraints need to have ran before,
39
- # as it means there is an existing node that we might want to update in the case of an upsert
40
- await self.uniqueness_constraint.check(node, filters=field_filters)
@@ -1 +1 @@
1
- GRAPH_VERSION = 22
1
+ GRAPH_VERSION = 25
@@ -24,6 +24,9 @@ from .m019_restore_rels_to_time import Migration019
24
24
  from .m020_duplicate_edges import Migration020
25
25
  from .m021_missing_hierarchy_merge import Migration021
26
26
  from .m022_add_generate_template_attr import Migration022
27
+ from .m023_deduplicate_cardinality_one_relationships import Migration023
28
+ from .m024_missing_hierarchy_backfill import Migration024
29
+ from .m025_uniqueness_nulls import Migration025
27
30
 
28
31
  if TYPE_CHECKING:
29
32
  from infrahub.core.root import Root
@@ -53,7 +56,9 @@ MIGRATIONS: list[type[GraphMigration | InternalSchemaMigration | ArbitraryMigrat
53
56
  Migration020,
54
57
  Migration021,
55
58
  Migration022,
56
- # Migration023, Enable this migration once it has been tested on bigger databases
59
+ Migration023,
60
+ Migration024,
61
+ Migration025,
57
62
  ]
58
63
 
59
64
 
@@ -21,81 +21,79 @@ if TYPE_CHECKING:
21
21
  log = get_logger()
22
22
 
23
23
 
24
+ async def validate_nulls_in_uniqueness_constraints(db: InfrahubDatabase) -> MigrationResult:
25
+ """
26
+ Validate any schema that include optional attributes in the uniqueness constraints
27
+
28
+ An update to uniqueness constraint validation now handles NULL values as unique instead of ignoring them
29
+ """
30
+
31
+ default_branch = registry.get_branch_from_registry()
32
+ build_component_registry()
33
+ component_registry = get_component_registry()
34
+ uniqueness_checker = await component_registry.get_component(UniquenessChecker, db=db, branch=default_branch)
35
+ non_unique_nodes_by_kind: dict[str, list[NonUniqueNode]] = defaultdict(list)
36
+
37
+ manager = SchemaManager()
38
+ registry.schema = manager
39
+ internal_schema_root = SchemaRoot(**internal_schema)
40
+ manager.register_schema(schema=internal_schema_root)
41
+ schema_branch = await manager.load_schema_from_db(db=db, branch=default_branch)
42
+ manager.set_schema_branch(name=default_branch.name, schema=schema_branch)
43
+
44
+ for schema_kind in schema_branch.node_names + schema_branch.generic_names_without_templates:
45
+ schema = schema_branch.get(name=schema_kind, duplicate=False)
46
+ if not isinstance(schema, NodeSchema | GenericSchema):
47
+ continue
48
+
49
+ uniqueness_constraint_paths = schema.get_unique_constraint_schema_attribute_paths(schema_branch=schema_branch)
50
+ includes_optional_attr: bool = False
51
+
52
+ for uniqueness_constraint_path in uniqueness_constraint_paths:
53
+ for schema_attribute_path in uniqueness_constraint_path.attributes_paths:
54
+ if schema_attribute_path.attribute_schema and schema_attribute_path.attribute_schema.optional is True:
55
+ includes_optional_attr = True
56
+ break
57
+
58
+ if not includes_optional_attr:
59
+ continue
60
+
61
+ non_unique_nodes = await uniqueness_checker.check_one_schema(schema=schema)
62
+ if non_unique_nodes:
63
+ non_unique_nodes_by_kind[schema_kind] = non_unique_nodes
64
+
65
+ if not non_unique_nodes_by_kind:
66
+ return MigrationResult()
67
+
68
+ error_strings = []
69
+ for schema_kind, non_unique_nodes in non_unique_nodes_by_kind.items():
70
+ display_label_map = await get_display_labels_per_kind(
71
+ db=db, kind=schema_kind, branch_name=default_branch.name, ids=[nun.node_id for nun in non_unique_nodes]
72
+ )
73
+ for non_unique_node in non_unique_nodes:
74
+ display_label = display_label_map.get(non_unique_node.node_id)
75
+ error_str = f"{display_label or ''}({non_unique_node.node_schema.kind} / {non_unique_node.node_id})"
76
+ error_str += " violates uniqueness constraints for the following attributes: "
77
+ attr_values = [
78
+ f"{attr.attribute_name}={attr.attribute_value}" for attr in non_unique_node.non_unique_attributes
79
+ ]
80
+ error_str += ", ".join(attr_values)
81
+ error_strings.append(error_str)
82
+ if error_strings:
83
+ error_str = "For the following nodes, you must update the uniqueness_constraints on the schema of the node"
84
+ error_str += " to remove the attribute(s) with NULL values or update the data on the nodes to be unique"
85
+ error_str += " now that NULL values are considered during uniqueness validation"
86
+ return MigrationResult(errors=[error_str] + error_strings)
87
+ return MigrationResult()
88
+
89
+
24
90
  class Migration018(InternalSchemaMigration):
25
91
  name: str = "018_validate_nulls_in_uniqueness_constraints"
26
92
  minimum_version: int = 17
27
93
  migrations: Sequence[SchemaMigration] = []
28
94
 
29
95
  async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
30
- result = MigrationResult()
31
-
32
- return result
96
+ return MigrationResult()
33
97
 
34
98
  async def execute(self, db: InfrahubDatabase) -> MigrationResult:
35
- """
36
- Validate any schema that include optional attributes in the uniqueness constraints
37
-
38
- An update to uniqueness constraint validation now handles NULL values as unique instead of ignoring them
39
- """
40
- default_branch = registry.get_branch_from_registry()
41
- build_component_registry()
42
- component_registry = get_component_registry()
43
- uniqueness_checker = await component_registry.get_component(UniquenessChecker, db=db, branch=default_branch)
44
- non_unique_nodes_by_kind: dict[str, list[NonUniqueNode]] = defaultdict(list)
45
-
46
- manager = SchemaManager()
47
- registry.schema = manager
48
- internal_schema_root = SchemaRoot(**internal_schema)
49
- manager.register_schema(schema=internal_schema_root)
50
- schema_branch = await manager.load_schema_from_db(db=db, branch=default_branch)
51
- manager.set_schema_branch(name=default_branch.name, schema=schema_branch)
52
-
53
- for schema_kind in schema_branch.node_names + schema_branch.generic_names_without_templates:
54
- schema = schema_branch.get(name=schema_kind, duplicate=False)
55
- if not isinstance(schema, NodeSchema | GenericSchema):
56
- continue
57
-
58
- uniqueness_constraint_paths = schema.get_unique_constraint_schema_attribute_paths(
59
- schema_branch=schema_branch
60
- )
61
- includes_optional_attr: bool = False
62
-
63
- for uniqueness_constraint_path in uniqueness_constraint_paths:
64
- for schema_attribute_path in uniqueness_constraint_path.attributes_paths:
65
- if (
66
- schema_attribute_path.attribute_schema
67
- and schema_attribute_path.attribute_schema.optional is True
68
- ):
69
- includes_optional_attr = True
70
- break
71
-
72
- if not includes_optional_attr:
73
- continue
74
-
75
- non_unique_nodes = await uniqueness_checker.check_one_schema(schema=schema)
76
- if non_unique_nodes:
77
- non_unique_nodes_by_kind[schema_kind] = non_unique_nodes
78
-
79
- if not non_unique_nodes_by_kind:
80
- return MigrationResult()
81
-
82
- error_strings = []
83
- for schema_kind, non_unique_nodes in non_unique_nodes_by_kind.items():
84
- display_label_map = await get_display_labels_per_kind(
85
- db=db, kind=schema_kind, branch_name=default_branch.name, ids=[nun.node_id for nun in non_unique_nodes]
86
- )
87
- for non_unique_node in non_unique_nodes:
88
- display_label = display_label_map.get(non_unique_node.node_id)
89
- error_str = f"{display_label or ''}({non_unique_node.node_schema.kind} / {non_unique_node.node_id})"
90
- error_str += " violates uniqueness constraints for the following attributes: "
91
- attr_values = [
92
- f"{attr.attribute_name}={attr.attribute_value}" for attr in non_unique_node.non_unique_attributes
93
- ]
94
- error_str += ", ".join(attr_values)
95
- error_strings.append(error_str)
96
- if error_strings:
97
- error_str = "For the following nodes, you must update the uniqueness_constraints on the schema of the node"
98
- error_str += " to remove the attribute(s) with NULL values or update the data on the nodes to be unique"
99
- error_str += " now that NULL values are considered during uniqueness validation"
100
- return MigrationResult(errors=[error_str] + error_strings)
101
- return MigrationResult()
99
+ return await validate_nulls_in_uniqueness_constraints(db=db)
@@ -0,0 +1,69 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any, Sequence
4
+
5
+ from infrahub.core import registry
6
+ from infrahub.core.initialization import initialization
7
+ from infrahub.core.migrations.shared import GraphMigration, MigrationResult
8
+ from infrahub.lock import initialize_lock
9
+ from infrahub.log import get_logger
10
+
11
+ from ...query import Query, QueryType
12
+
13
+ if TYPE_CHECKING:
14
+ from infrahub.database import InfrahubDatabase
15
+
16
+ log = get_logger()
17
+
18
+
19
+ class BackfillMissingHierarchyQuery(Query):
20
+ name = "backfill_missing_hierarchy"
21
+ type = QueryType.WRITE
22
+ insert_return = False
23
+
24
+ async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
25
+ # load schemas from database into registry
26
+ initialize_lock()
27
+ await initialization(db=db)
28
+ kind_hierarchy_map: dict[str, str] = {}
29
+ schema_branch = await registry.schema.load_schema_from_db(db=db)
30
+ for node_schema_kind in schema_branch.node_names:
31
+ node_schema = schema_branch.get_node(name=node_schema_kind, duplicate=False)
32
+ if node_schema.hierarchy:
33
+ kind_hierarchy_map[node_schema.kind] = node_schema.hierarchy
34
+
35
+ self.params = {"hierarchy_map": kind_hierarchy_map}
36
+ query = """
37
+ MATCH (r:Root)
38
+ WITH r.default_branch AS default_branch
39
+ MATCH (rel:Relationship {name: "parent__child"})-[e:IS_RELATED]-(n:Node)
40
+ WHERE e.hierarchy IS NULL
41
+ WITH DISTINCT rel, n, default_branch
42
+ CALL {
43
+ WITH rel, n, default_branch
44
+ MATCH (rel)-[e:IS_RELATED {branch: default_branch}]-(n)
45
+ RETURN e
46
+ ORDER BY e.from DESC
47
+ LIMIT 1
48
+ }
49
+ WITH rel, n, e
50
+ WHERE e.status = "active" AND e.hierarchy IS NULL
51
+ SET e.hierarchy = $hierarchy_map[n.kind]
52
+ """
53
+ self.add_to_query(query)
54
+
55
+
56
+ class Migration024(GraphMigration):
57
+ """
58
+ A bug in diff merge logic caused the hierarchy information on IS_RELATED edges to be lost when merged into
59
+ main. This migration backfills the missing hierarchy data and accounts for the case when the branch that
60
+ created the data has been deleted.
61
+ """
62
+
63
+ name: str = "024_backfill_hierarchy"
64
+ minimum_version: int = 23
65
+ queries: Sequence[type[Query]] = [BackfillMissingHierarchyQuery]
66
+
67
+ async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
68
+ result = MigrationResult()
69
+ return result
@@ -0,0 +1,26 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Sequence
4
+
5
+ from infrahub.core.migrations.shared import MigrationResult
6
+ from infrahub.log import get_logger
7
+
8
+ from ..shared import InternalSchemaMigration, SchemaMigration
9
+ from .m018_uniqueness_nulls import validate_nulls_in_uniqueness_constraints
10
+
11
+ if TYPE_CHECKING:
12
+ from infrahub.database import InfrahubDatabase
13
+
14
+ log = get_logger()
15
+
16
+
17
+ class Migration025(InternalSchemaMigration):
18
+ name: str = "025_validate_nulls_in_uniqueness_constraints"
19
+ minimum_version: int = 24
20
+ migrations: Sequence[SchemaMigration] = []
21
+
22
+ async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
23
+ return MigrationResult()
24
+
25
+ async def execute(self, db: InfrahubDatabase) -> MigrationResult:
26
+ return await validate_nulls_in_uniqueness_constraints(db=db)
@@ -4,6 +4,7 @@ from typing import TYPE_CHECKING, Any, Sequence
4
4
 
5
5
  from infrahub.core.constants import RelationshipStatus
6
6
  from infrahub.core.graph.schema import GraphAttributeRelationships
7
+ from infrahub.core.schema.generic_schema import GenericSchema
7
8
 
8
9
  from ..query import AttributeMigrationQuery
9
10
  from ..shared import AttributeSchemaMigration
@@ -22,8 +23,20 @@ class NodeAttributeRemoveMigrationQuery01(AttributeMigrationQuery):
22
23
  branch_filter, branch_params = self.branch.get_query_filter_path(at=self.at.to_string())
23
24
  self.params.update(branch_params)
24
25
 
26
+ attr_name = self.migration.schema_path.field_name
27
+ kinds_to_ignore = []
28
+ if isinstance(self.migration.new_node_schema, GenericSchema) and attr_name is not None:
29
+ for inheriting_schema_kind in self.migration.new_node_schema.used_by:
30
+ node_schema = db.schema.get_node_schema(
31
+ name=inheriting_schema_kind, branch=self.branch, duplicate=False
32
+ )
33
+ attr_schema = node_schema.get_attribute_or_none(name=attr_name)
34
+ if attr_schema and not attr_schema.inherited:
35
+ kinds_to_ignore.append(inheriting_schema_kind)
36
+
25
37
  self.params["node_kind"] = self.migration.new_schema.kind
26
- self.params["attr_name"] = self.migration.schema_path.field_name
38
+ self.params["kinds_to_ignore"] = kinds_to_ignore
39
+ self.params["attr_name"] = attr_name
27
40
  self.params["current_time"] = self.at.to_string()
28
41
  self.params["branch_name"] = self.branch.name
29
42
  self.params["branch_support"] = self.migration.previous_attribute_schema.get_branch().value
@@ -60,7 +73,8 @@ class NodeAttributeRemoveMigrationQuery01(AttributeMigrationQuery):
60
73
  query = """
61
74
  // Find all the active nodes
62
75
  MATCH (node:%(node_kind)s)
63
- WHERE exists((node)-[:HAS_ATTRIBUTE]-(:Attribute { name: $attr_name }))
76
+ WHERE (size($kinds_to_ignore) = 0 OR NOT any(l IN labels(node) WHERE l IN $kinds_to_ignore))
77
+ AND exists((node)-[:HAS_ATTRIBUTE]-(:Attribute { name: $attr_name }))
64
78
  CALL {
65
79
  WITH node
66
80
  MATCH (root:Root)<-[r:IS_PART_OF]-(node)
infrahub/core/models.py CHANGED
@@ -19,6 +19,8 @@ if TYPE_CHECKING:
19
19
  from infrahub.core.schema import MainSchemaTypes
20
20
  from infrahub.core.schema.schema_branch import SchemaBranch
21
21
 
22
+ GENERIC_ATTRIBUTES_TO_IGNORE = ["namespace", "name", "branch"]
23
+
22
24
 
23
25
  class NodeKind(BaseModel):
24
26
  namespace: str
@@ -270,6 +272,10 @@ class SchemaUpdateValidationResult(BaseModel):
270
272
  field_info = schema.model_fields[node_field_name]
271
273
  field_update = str(field_info.json_schema_extra.get("update")) # type: ignore[union-attr]
272
274
 
275
+ # No need to execute a migration for generic nodes attributes because they are not stored in the database
276
+ if schema.is_generic_schema and node_field_name in GENERIC_ATTRIBUTES_TO_IGNORE:
277
+ return
278
+
273
279
  schema_path = SchemaPath( # type: ignore[call-arg]
274
280
  schema_kind=schema.kind,
275
281
  path_type=SchemaPathType.NODE,
@@ -523,7 +529,7 @@ class HashableModel(BaseModel):
523
529
 
524
530
  return new_list
525
531
 
526
- def update(self, other: Self) -> Self:
532
+ def update(self, other: HashableModel) -> Self:
527
533
  """Update the current object with the new value from the new one if they are defined.
528
534
 
529
535
  Currently this method works for the following type of fields
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  from enum import Enum
4
4
  from typing import TYPE_CHECKING, Any, Sequence, TypeVar, overload
5
5
 
6
+ from infrahub_sdk.template import Jinja2Template
6
7
  from infrahub_sdk.utils import is_valid_uuid
7
8
  from infrahub_sdk.uuidt import UUIDT
8
9
 
@@ -24,7 +25,6 @@ from infrahub.core.query.node import NodeCheckIDQuery, NodeCreateAllQuery, NodeD
24
25
  from infrahub.core.schema import AttributeSchema, NodeSchema, ProfileSchema, RelationshipSchema, TemplateSchema
25
26
  from infrahub.core.timestamp import Timestamp
26
27
  from infrahub.exceptions import InitializationError, NodeNotFoundError, PoolExhaustedError, ValidationError
27
- from infrahub.support.macro import MacroDefinition
28
28
  from infrahub.types import ATTRIBUTE_TYPES
29
29
 
30
30
  from ...graphql.constants import KIND_GRAPHQL_FIELD_NAME
@@ -458,9 +458,9 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
458
458
  ValidationError({macro: f"{macro} is missing computational_logic for macro ({attr_schema.kind})"})
459
459
  )
460
460
  continue
461
- macro_definition = MacroDefinition(macro=attr_schema.computed_attribute.jinja2_template)
462
461
 
463
- for variable in macro_definition.variables:
462
+ jinja_template = Jinja2Template(template=attr_schema.computed_attribute.jinja2_template)
463
+ for variable in jinja_template.get_variables():
464
464
  attribute_path = schema_branch.validate_schema_path(
465
465
  node_schema=self._schema, path=variable, allowed_path_types=allowed_path_types
466
466
  )
@@ -487,7 +487,7 @@ class Node(BaseNode, metaclass=BaseNodeMeta):
487
487
  )
488
488
  variables[variable] = attribute
489
489
 
490
- content = macro_definition.render(variables=variables)
490
+ content = await jinja_template.render(variables=variables)
491
491
 
492
492
  generator_method_name = "_generate_attribute_default"
493
493
  if hasattr(self, f"generate_{attr_schema.name}"):
@@ -220,21 +220,41 @@ class NodeGroupedUniquenessConstraint(NodeConstraintInterface):
220
220
 
221
221
  violations = []
222
222
  for schema in schemas_to_check:
223
+ schema_filters = list(filters) if filters is not None else []
224
+ for attr_schema in schema.attributes:
225
+ if attr_schema.optional and attr_schema.unique and attr_schema.name not in schema_filters:
226
+ schema_filters.append(attr_schema.name)
227
+
223
228
  schema_violations = await self._get_single_schema_violations(
224
- node=node, node_schema=schema, at=at, filters=filters
229
+ node=node, node_schema=schema, at=at, filters=schema_filters
225
230
  )
226
231
  violations.extend(schema_violations)
227
232
 
228
- is_hfid_violated = any(violation.typ == UniquenessConstraintType.HFID for violation in violations)
233
+ hfid_violations = [violation for violation in violations if violation.typ == UniquenessConstraintType.HFID]
234
+ hfid_violation = hfid_violations[0] if len(hfid_violations) > 0 else None
229
235
 
230
- for violation in violations:
231
- if violation.typ == UniquenessConstraintType.STANDARD or (
232
- violation.typ == UniquenessConstraintType.SUBSET_OF_HFID and not is_hfid_violated
233
- ):
234
- error_msg = f"Violates uniqueness constraint '{'-'.join(violation.fields)}'"
235
- raise ValidationError(error_msg)
236
+ # If there are both a hfid violation and another one, in case of an upsert, we still want to update the node in case other violations are:
237
+ # - either on subset fields of hfid, which would be necessarily violated too
238
+ # - or on uniqueness constraints with a matching node id being the id of the hfid violation
236
239
 
237
240
  for violation in violations:
238
241
  if violation.typ == UniquenessConstraintType.HFID:
239
- error_msg = f"Violates uniqueness constraint '{'-'.join(violation.fields)}'"
240
- raise HFIDViolatedError(error_msg, matching_nodes_ids=violation.nodes_ids)
242
+ continue
243
+
244
+ if hfid_violation:
245
+ if violation.typ == UniquenessConstraintType.SUBSET_OF_HFID:
246
+ continue
247
+
248
+ if (
249
+ violation.typ == UniquenessConstraintType.STANDARD
250
+ and len(violation.nodes_ids) == 1
251
+ and next(iter(violation.nodes_ids)) == next(iter(hfid_violation.nodes_ids))
252
+ ):
253
+ continue
254
+
255
+ error_msg = f"Violates uniqueness constraint '{'-'.join(violation.fields)}'"
256
+ raise ValidationError(error_msg)
257
+
258
+ if hfid_violation:
259
+ error_msg = f"Violates uniqueness constraint '{'-'.join(hfid_violation.fields)}'"
260
+ raise HFIDViolatedError(error_msg, matching_nodes_ids=hfid_violation.nodes_ids)
@@ -362,7 +362,7 @@ class IPPrefixReconcileQuery(Query):
362
362
  # possible prefix: highest possible prefix length for a match
363
363
  possible_prefix_map: dict[str, int] = {}
364
364
  start_prefixlen = prefixlen if is_address else prefixlen - 1
365
- for max_prefix_len in range(start_prefixlen, 0, -1):
365
+ for max_prefix_len in range(start_prefixlen, -1, -1):
366
366
  tmp_prefix = prefix_bin_host[:max_prefix_len]
367
367
  possible_prefix = tmp_prefix.ljust(self.ip_value.max_prefixlen, "0")
368
368
  if possible_prefix not in possible_prefix_map:
infrahub/core/registry.py CHANGED
@@ -220,5 +220,23 @@ class Registry:
220
220
  and branch.active_schema_hash.main != default_branch.active_schema_hash.main
221
221
  ]
222
222
 
223
+ async def purge_inactive_branches(
224
+ self, db: InfrahubDatabase, active_branches: list[Branch] | None = None
225
+ ) -> list[str]:
226
+ """Return a list of branches that were purged from the registry."""
227
+ active_branches = active_branches or await self.branch_object.get_list(db=db)
228
+ active_branch_names = [branch.name for branch in active_branches]
229
+ purged_branches: set[str] = set()
230
+
231
+ for branch_name in list(registry.branch.keys()):
232
+ if branch_name not in active_branch_names:
233
+ del registry.branch[branch_name]
234
+ purged_branches.add(branch_name)
235
+
236
+ purged_branches.update(self.schema.purge_inactive_branches(active_branches=active_branch_names))
237
+ purged_branches.update(db.purge_inactive_schemas(active_branches=active_branch_names))
238
+
239
+ return sorted(purged_branches)
240
+
223
241
 
224
242
  registry = Registry()