infrahub-server 1.5.0b1__py3-none-any.whl → 1.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (171) hide show
  1. infrahub/api/dependencies.py +4 -13
  2. infrahub/api/internal.py +2 -0
  3. infrahub/api/oauth2.py +13 -19
  4. infrahub/api/oidc.py +15 -21
  5. infrahub/api/schema.py +24 -3
  6. infrahub/api/transformation.py +22 -20
  7. infrahub/artifacts/models.py +2 -1
  8. infrahub/auth.py +137 -3
  9. infrahub/cli/__init__.py +2 -0
  10. infrahub/cli/db.py +158 -155
  11. infrahub/cli/dev.py +118 -0
  12. infrahub/cli/tasks.py +46 -0
  13. infrahub/cli/upgrade.py +56 -9
  14. infrahub/computed_attribute/tasks.py +20 -8
  15. infrahub/core/attribute.py +10 -2
  16. infrahub/core/branch/enums.py +1 -1
  17. infrahub/core/branch/models.py +7 -3
  18. infrahub/core/branch/tasks.py +68 -7
  19. infrahub/core/constants/__init__.py +3 -0
  20. infrahub/core/diff/calculator.py +2 -2
  21. infrahub/core/diff/query/artifact.py +1 -0
  22. infrahub/core/diff/query/delete_query.py +9 -5
  23. infrahub/core/diff/query/field_summary.py +1 -0
  24. infrahub/core/diff/query/merge.py +39 -23
  25. infrahub/core/graph/__init__.py +1 -1
  26. infrahub/core/initialization.py +5 -2
  27. infrahub/core/migrations/__init__.py +3 -0
  28. infrahub/core/migrations/exceptions.py +4 -0
  29. infrahub/core/migrations/graph/__init__.py +12 -13
  30. infrahub/core/migrations/graph/load_schema_branch.py +21 -0
  31. infrahub/core/migrations/graph/m013_convert_git_password_credential.py +1 -1
  32. infrahub/core/migrations/graph/m037_index_attr_vals.py +11 -30
  33. infrahub/core/migrations/graph/m039_ipam_reconcile.py +9 -7
  34. infrahub/core/migrations/graph/m040_duplicated_attributes.py +81 -0
  35. infrahub/core/migrations/graph/m041_deleted_dup_edges.py +149 -0
  36. infrahub/core/migrations/graph/m042_profile_attrs_in_db.py +147 -0
  37. infrahub/core/migrations/graph/m043_create_hfid_display_label_in_db.py +164 -0
  38. infrahub/core/migrations/graph/m044_backfill_hfid_display_label_in_db.py +864 -0
  39. infrahub/core/migrations/query/__init__.py +7 -8
  40. infrahub/core/migrations/query/attribute_add.py +8 -6
  41. infrahub/core/migrations/query/attribute_remove.py +134 -0
  42. infrahub/core/migrations/runner.py +54 -0
  43. infrahub/core/migrations/schema/attribute_kind_update.py +9 -3
  44. infrahub/core/migrations/schema/attribute_supports_profile.py +90 -0
  45. infrahub/core/migrations/schema/node_attribute_add.py +30 -2
  46. infrahub/core/migrations/schema/node_attribute_remove.py +13 -109
  47. infrahub/core/migrations/schema/node_kind_update.py +2 -1
  48. infrahub/core/migrations/schema/node_remove.py +2 -1
  49. infrahub/core/migrations/schema/placeholder_dummy.py +3 -2
  50. infrahub/core/migrations/shared.py +62 -14
  51. infrahub/core/models.py +2 -2
  52. infrahub/core/node/__init__.py +42 -12
  53. infrahub/core/node/create.py +46 -63
  54. infrahub/core/node/lock_utils.py +70 -44
  55. infrahub/core/node/resource_manager/ip_address_pool.py +2 -1
  56. infrahub/core/node/resource_manager/ip_prefix_pool.py +2 -1
  57. infrahub/core/node/resource_manager/number_pool.py +2 -1
  58. infrahub/core/query/attribute.py +55 -0
  59. infrahub/core/query/diff.py +61 -16
  60. infrahub/core/query/ipam.py +16 -4
  61. infrahub/core/query/node.py +51 -43
  62. infrahub/core/query/relationship.py +1 -0
  63. infrahub/core/relationship/model.py +10 -5
  64. infrahub/core/schema/__init__.py +56 -0
  65. infrahub/core/schema/attribute_schema.py +4 -0
  66. infrahub/core/schema/definitions/core/check.py +1 -1
  67. infrahub/core/schema/definitions/core/transform.py +1 -1
  68. infrahub/core/schema/definitions/internal.py +2 -2
  69. infrahub/core/schema/generated/attribute_schema.py +2 -2
  70. infrahub/core/schema/manager.py +22 -1
  71. infrahub/core/schema/schema_branch.py +180 -22
  72. infrahub/core/schema/schema_branch_display.py +12 -0
  73. infrahub/core/schema/schema_branch_hfid.py +6 -0
  74. infrahub/core/validators/uniqueness/checker.py +2 -1
  75. infrahub/database/__init__.py +0 -13
  76. infrahub/database/graph.py +21 -0
  77. infrahub/display_labels/tasks.py +13 -7
  78. infrahub/events/branch_action.py +27 -1
  79. infrahub/generators/tasks.py +3 -7
  80. infrahub/git/base.py +4 -1
  81. infrahub/git/integrator.py +1 -1
  82. infrahub/git/models.py +2 -1
  83. infrahub/git/repository.py +22 -5
  84. infrahub/git/tasks.py +66 -10
  85. infrahub/git/utils.py +123 -1
  86. infrahub/graphql/analyzer.py +9 -0
  87. infrahub/graphql/api/endpoints.py +14 -4
  88. infrahub/graphql/manager.py +4 -9
  89. infrahub/graphql/mutations/branch.py +5 -0
  90. infrahub/graphql/mutations/convert_object_type.py +11 -1
  91. infrahub/graphql/mutations/display_label.py +17 -10
  92. infrahub/graphql/mutations/hfid.py +17 -10
  93. infrahub/graphql/mutations/ipam.py +54 -35
  94. infrahub/graphql/mutations/main.py +27 -28
  95. infrahub/graphql/mutations/proposed_change.py +6 -0
  96. infrahub/graphql/schema_sort.py +170 -0
  97. infrahub/graphql/types/branch.py +4 -1
  98. infrahub/graphql/types/enums.py +3 -0
  99. infrahub/hfid/tasks.py +13 -7
  100. infrahub/lock.py +52 -12
  101. infrahub/message_bus/types.py +3 -1
  102. infrahub/permissions/constants.py +2 -0
  103. infrahub/profiles/queries/get_profile_data.py +4 -5
  104. infrahub/proposed_change/tasks.py +66 -23
  105. infrahub/server.py +6 -2
  106. infrahub/services/__init__.py +2 -2
  107. infrahub/services/adapters/http/__init__.py +5 -0
  108. infrahub/services/adapters/workflow/worker.py +14 -3
  109. infrahub/task_manager/event.py +5 -0
  110. infrahub/task_manager/models.py +7 -0
  111. infrahub/task_manager/task.py +73 -0
  112. infrahub/trigger/setup.py +13 -4
  113. infrahub/trigger/tasks.py +3 -0
  114. infrahub/workers/dependencies.py +10 -1
  115. infrahub/workers/infrahub_async.py +10 -2
  116. infrahub/workflows/catalogue.py +8 -0
  117. infrahub/workflows/initialization.py +5 -0
  118. infrahub/workflows/utils.py +2 -1
  119. infrahub_sdk/analyzer.py +1 -1
  120. infrahub_sdk/batch.py +2 -2
  121. infrahub_sdk/branch.py +14 -2
  122. infrahub_sdk/checks.py +1 -1
  123. infrahub_sdk/client.py +15 -14
  124. infrahub_sdk/config.py +29 -2
  125. infrahub_sdk/ctl/branch.py +3 -0
  126. infrahub_sdk/ctl/cli_commands.py +2 -0
  127. infrahub_sdk/ctl/exceptions.py +1 -1
  128. infrahub_sdk/ctl/schema.py +22 -7
  129. infrahub_sdk/ctl/task.py +110 -0
  130. infrahub_sdk/exceptions.py +18 -18
  131. infrahub_sdk/graphql/query.py +2 -2
  132. infrahub_sdk/node/attribute.py +1 -1
  133. infrahub_sdk/node/property.py +1 -1
  134. infrahub_sdk/node/related_node.py +3 -3
  135. infrahub_sdk/node/relationship.py +4 -6
  136. infrahub_sdk/object_store.py +2 -2
  137. infrahub_sdk/operation.py +1 -1
  138. infrahub_sdk/protocols_generator/generator.py +1 -1
  139. infrahub_sdk/pytest_plugin/exceptions.py +9 -9
  140. infrahub_sdk/pytest_plugin/items/base.py +1 -1
  141. infrahub_sdk/pytest_plugin/items/check.py +1 -1
  142. infrahub_sdk/pytest_plugin/items/python_transform.py +1 -1
  143. infrahub_sdk/repository.py +1 -1
  144. infrahub_sdk/schema/__init__.py +33 -5
  145. infrahub_sdk/spec/models.py +7 -0
  146. infrahub_sdk/spec/object.py +41 -102
  147. infrahub_sdk/spec/processors/__init__.py +0 -0
  148. infrahub_sdk/spec/processors/data_processor.py +10 -0
  149. infrahub_sdk/spec/processors/factory.py +34 -0
  150. infrahub_sdk/spec/processors/range_expand_processor.py +56 -0
  151. infrahub_sdk/task/exceptions.py +4 -4
  152. infrahub_sdk/task/manager.py +2 -2
  153. infrahub_sdk/task/models.py +6 -4
  154. infrahub_sdk/timestamp.py +1 -1
  155. infrahub_sdk/transfer/exporter/json.py +1 -1
  156. infrahub_sdk/transfer/importer/json.py +1 -1
  157. infrahub_sdk/transforms.py +1 -1
  158. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/METADATA +4 -2
  159. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/RECORD +168 -152
  160. infrahub_testcontainers/container.py +144 -6
  161. infrahub_testcontainers/docker-compose-cluster.test.yml +5 -0
  162. infrahub_testcontainers/docker-compose.test.yml +5 -0
  163. infrahub_testcontainers/helpers.py +19 -4
  164. infrahub_testcontainers/models.py +8 -6
  165. infrahub_testcontainers/performance_test.py +6 -4
  166. infrahub/core/migrations/graph/m040_profile_attrs_in_db.py +0 -166
  167. infrahub/core/migrations/graph/m041_create_hfid_display_label_in_db.py +0 -97
  168. infrahub/core/migrations/graph/m042_backfill_hfid_display_label_in_db.py +0 -86
  169. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/LICENSE.txt +0 -0
  170. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/WHEEL +0 -0
  171. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/entry_points.txt +0 -0
@@ -49,11 +49,18 @@ WITH node_diff_map, is_node_kind_migration, CASE
49
49
  WHEN is_node_kind_migration THEN $migrated_kinds_id_map[node_diff_map.uuid]
50
50
  ELSE NULL
51
51
  END AS node_db_id
52
+
53
+ // ------------------------------
54
+ // find the correct Node if the Node had its kind/inheritance migrated
55
+ // and there are multiple Nodes with the same UUID
56
+ // ------------------------------
52
57
  CALL (node_diff_map, node_db_id) {
53
- MATCH (n:Node {uuid: node_diff_map.uuid})
54
- WHERE node_db_id IS NULL
55
- OR %(id_func)s(n) = node_db_id
58
+ MATCH (n:Node {uuid: node_diff_map.uuid})-[n_is_part_of:IS_PART_OF]->(:Root)
59
+ WHERE node_db_id IS NULL OR %(id_func)s(n) = node_db_id
60
+ AND n_is_part_of.branch IN [$source_branch, $target_branch]
56
61
  RETURN n
62
+ ORDER BY n_is_part_of.branch_level DESC, n_is_part_of.from DESC, n_is_part_of.status ASC
63
+ LIMIT 1
57
64
  }
58
65
  WITH n, node_diff_map, is_node_kind_migration
59
66
  CALL (n, node_diff_map, is_node_kind_migration) {
@@ -224,16 +231,28 @@ CALL (n, node_diff_map, is_node_kind_migration) {
224
231
  ELSE NULL
225
232
  END AS rel_peer_db_id
226
233
  // ------------------------------
234
+ // find the correct relationship peer if the peer had its kind/inheritance migrated
235
+ // and there are multiple Nodes with the same UUID
236
+ // ------------------------------
237
+ CALL (rel_peer_id, rel_peer_db_id) {
238
+ MATCH (rel_peer:Node {uuid: rel_peer_id})-[target_is_part_of:IS_PART_OF]->(:Root)
239
+ WHERE (rel_peer_db_id IS NULL OR %(id_func)s(rel_peer) = rel_peer_db_id)
240
+ AND target_is_part_of.branch IN [$source_branch, $target_branch]
241
+ RETURN rel_peer
242
+ ORDER BY target_is_part_of.branch_level DESC, target_is_part_of.from DESC, target_is_part_of.status ASC
243
+ LIMIT 1
244
+ }
245
+ WITH rel_name, related_rel_status, rel_peer
246
+ // ------------------------------
227
247
  // determine the directions of each IS_RELATED
228
248
  // ------------------------------
229
- CALL (n, rel_name, rel_peer_id, rel_peer_db_id, related_rel_status) {
249
+ CALL (n, rel_name, rel_peer, related_rel_status) {
230
250
  MATCH (n)
231
251
  -[source_r_rel_1:IS_RELATED]
232
252
  -(r:Relationship {name: rel_name})
233
253
  -[source_r_rel_2:IS_RELATED]
234
- -(rel_peer:Node {uuid: rel_peer_id})
235
- WHERE (rel_peer_db_id IS NULL OR %(id_func)s(rel_peer) = rel_peer_db_id)
236
- AND source_r_rel_1.branch IN [$source_branch, $target_branch]
254
+ -(rel_peer)
255
+ WHERE source_r_rel_1.branch IN [$source_branch, $target_branch]
237
256
  AND source_r_rel_2.branch IN [$source_branch, $target_branch]
238
257
  AND source_r_rel_1.from <= $at AND source_r_rel_1.to IS NULL
239
258
  AND source_r_rel_2.from <= $at AND source_r_rel_2.to IS NULL
@@ -251,37 +270,34 @@ CALL (n, node_diff_map, is_node_kind_migration) {
251
270
  source_r_rel_1.hierarchy AS r1_hierarchy,
252
271
  source_r_rel_2.hierarchy AS r2_hierarchy
253
272
  }
254
- WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer_id, rel_peer_db_id, related_rel_status
255
- CALL (n, rel_name, rel_peer_id, rel_peer_db_id, related_rel_status) {
273
+ WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status
274
+ CALL (n, rel_name, rel_peer, related_rel_status) {
256
275
  OPTIONAL MATCH (n)
257
276
  -[target_r_rel_1:IS_RELATED {branch: $target_branch, status: "active"}]
258
277
  -(:Relationship {name: rel_name})
259
278
  -[target_r_rel_2:IS_RELATED {branch: $target_branch, status: "active"}]
260
- -(rel_peer:Node {uuid: rel_peer_id})
279
+ -(rel_peer)
261
280
  WHERE related_rel_status = "deleted"
262
- AND (rel_peer_db_id IS NULL OR %(id_func)s(rel_peer) = rel_peer_db_id)
263
281
  AND target_r_rel_1.from <= $at AND target_r_rel_1.to IS NULL
264
282
  AND target_r_rel_2.from <= $at AND target_r_rel_2.to IS NULL
265
283
  SET target_r_rel_1.to = $at
266
284
  SET target_r_rel_2.to = $at
267
285
  }
268
- WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer_id, rel_peer_db_id, related_rel_status
286
+ WITH n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status
269
287
  // ------------------------------
270
288
  // conditionally create new IS_RELATED relationships on target_branch, if necessary
271
289
  // ------------------------------
272
- CALL (n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer_id, rel_peer_db_id, related_rel_status) {
273
- MATCH (p:Node {uuid: rel_peer_id})
274
- WHERE rel_peer_db_id IS NULL OR %(id_func)s(p) = rel_peer_db_id
290
+ CALL (n, r, r1_dir, r2_dir, r1_hierarchy, r2_hierarchy, rel_name, rel_peer, related_rel_status) {
275
291
  OPTIONAL MATCH (n)
276
292
  -[r_rel_1:IS_RELATED {branch: $target_branch, status: related_rel_status}]
277
293
  -(:Relationship {name: rel_name})
278
294
  -[r_rel_2:IS_RELATED {branch: $target_branch, status: related_rel_status}]
279
- -(p)
295
+ -(rel_peer)
280
296
  WHERE r_rel_1.from <= $at
281
297
  AND (r_rel_1.to >= $at OR r_rel_1.to IS NULL)
282
298
  AND r_rel_2.from <= $at
283
299
  AND (r_rel_2.to >= $at OR r_rel_2.to IS NULL)
284
- WITH p, r_rel_1, r_rel_2
300
+ WITH rel_peer, r_rel_1, r_rel_2
285
301
  WHERE r_rel_1 IS NULL
286
302
  AND r_rel_2 IS NULL
287
303
  // ------------------------------
@@ -301,19 +317,19 @@ CALL (n, node_diff_map, is_node_kind_migration) {
301
317
  <-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r1_hierarchy}]
302
318
  -(r)
303
319
  }
304
- CALL (r, p, r2_dir, r2_hierarchy, related_rel_status) {
305
- WITH r, p, r2_dir, r2_hierarchy, related_rel_status
320
+ CALL (r, rel_peer, r2_dir, r2_hierarchy, related_rel_status) {
321
+ WITH r, rel_peer, r2_dir, r2_hierarchy, related_rel_status
306
322
  WHERE r2_dir = "r"
307
323
  CREATE (r)
308
324
  -[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r2_hierarchy}]
309
- ->(p)
325
+ ->(rel_peer)
310
326
  }
311
- CALL (r, p, r2_dir, r2_hierarchy, related_rel_status) {
312
- WITH r, p, r2_dir, r2_hierarchy, related_rel_status
327
+ CALL (r, rel_peer, r2_dir, r2_hierarchy, related_rel_status) {
328
+ WITH r, rel_peer, r2_dir, r2_hierarchy, related_rel_status
313
329
  WHERE r2_dir = "l"
314
330
  CREATE (r)
315
331
  <-[:IS_RELATED {branch: $target_branch, branch_level: $branch_level, from: $at, status: related_rel_status, hierarchy: r2_hierarchy}]
316
- -(p)
332
+ -(rel_peer)
317
333
  }
318
334
  }
319
335
  }
@@ -1 +1 @@
1
- GRAPH_VERSION = 42
1
+ GRAPH_VERSION = 44
@@ -50,7 +50,7 @@ async def get_root_node(db: InfrahubDatabase, initialize: bool = False) -> Root:
50
50
  roots = await Root.get_list(db=db)
51
51
  if len(roots) == 0 and not initialize:
52
52
  raise DatabaseError(
53
- "The Database hasn't been initialized for Infrahub, please run 'infrahub db init' or 'infrahub server start' to initialize the database."
53
+ "The Database hasn't been initialized for Infrahub, please 'infrahub server start' to initialize the database."
54
54
  )
55
55
 
56
56
  if len(roots) == 0:
@@ -137,7 +137,8 @@ async def add_indexes(db: InfrahubDatabase) -> None:
137
137
  await index_manager.add()
138
138
 
139
139
 
140
- async def initialization(db: InfrahubDatabase, add_database_indexes: bool = False) -> None:
140
+ async def initialization(db: InfrahubDatabase, add_database_indexes: bool = False) -> bool:
141
+ """Run initialization and setup, returns a boolean to indicate if it's the initial setup."""
141
142
  if config.SETTINGS.database.db_type == config.DatabaseType.MEMGRAPH:
142
143
  session = await db.session()
143
144
  await session.run(query="SET DATABASE SETTING 'log.level' TO 'INFO'")
@@ -148,6 +149,7 @@ async def initialization(db: InfrahubDatabase, add_database_indexes: bool = Fals
148
149
  # Initialize the database and Load the Root node
149
150
  # ---------------------------------------------------
150
151
  async with lock.registry.initialization():
152
+ first_time_initialization = len(await Root.get_list(db=db)) == 0
151
153
  log.debug("Checking Root Node")
152
154
  await initialize_registry(db=db, initialize=True)
153
155
 
@@ -210,6 +212,7 @@ async def initialization(db: InfrahubDatabase, add_database_indexes: bool = Fals
210
212
  ip_namespace = await get_default_ipnamespace(db=db)
211
213
  if ip_namespace:
212
214
  registry.default_ipnamespace = ip_namespace.id
215
+ return first_time_initialization
213
216
 
214
217
 
215
218
  async def create_root_node(db: InfrahubDatabase) -> Root:
@@ -1,5 +1,6 @@
1
1
  from .schema.attribute_kind_update import AttributeKindUpdateMigration
2
2
  from .schema.attribute_name_update import AttributeNameUpdateMigration
3
+ from .schema.attribute_supports_profile import AttributeSupportsProfileUpdateMigration
3
4
  from .schema.node_attribute_add import NodeAttributeAddMigration
4
5
  from .schema.node_attribute_remove import NodeAttributeRemoveMigration
5
6
  from .schema.node_kind_update import NodeKindUpdateMigration
@@ -19,6 +20,8 @@ MIGRATION_MAP: dict[str, type[SchemaMigration] | None] = {
19
20
  "attribute.name.update": AttributeNameUpdateMigration,
20
21
  "attribute.branch.update": None,
21
22
  "attribute.kind.update": AttributeKindUpdateMigration,
23
+ "attribute.optional.update": AttributeSupportsProfileUpdateMigration,
24
+ "attribute.read_only.update": AttributeSupportsProfileUpdateMigration,
22
25
  "relationship.branch.update": None,
23
26
  "relationship.direction.update": None,
24
27
  "relationship.identifier.update": PlaceholderDummyMigration,
@@ -0,0 +1,4 @@
1
+ class MigrationFailureError(Exception):
2
+ def __init__(self, errors: list[str]) -> None:
3
+ super().__init__()
4
+ self.errors = errors
@@ -41,16 +41,17 @@ from .m036_drop_attr_value_index import Migration036
41
41
  from .m037_index_attr_vals import Migration037
42
42
  from .m038_redo_0000_prefix_fix import Migration038
43
43
  from .m039_ipam_reconcile import Migration039
44
- from .m040_profile_attrs_in_db import Migration040
45
- from .m041_create_hfid_display_label_in_db import Migration041
46
- from .m042_backfill_hfid_display_label_in_db import Migration042
44
+ from .m040_duplicated_attributes import Migration040
45
+ from .m041_deleted_dup_edges import Migration041
46
+ from .m042_profile_attrs_in_db import Migration042
47
+ from .m043_create_hfid_display_label_in_db import Migration043
48
+ from .m044_backfill_hfid_display_label_in_db import Migration044
47
49
 
48
50
  if TYPE_CHECKING:
49
- from infrahub.core.root import Root
51
+ from ..shared import MigrationTypes
50
52
 
51
- from ..shared import ArbitraryMigration, GraphMigration, InternalSchemaMigration
52
53
 
53
- MIGRATIONS: list[type[GraphMigration | InternalSchemaMigration | ArbitraryMigration]] = [
54
+ MIGRATIONS: list[type[MigrationTypes]] = [
54
55
  Migration001,
55
56
  Migration002,
56
57
  Migration003,
@@ -93,25 +94,23 @@ MIGRATIONS: list[type[GraphMigration | InternalSchemaMigration | ArbitraryMigrat
93
94
  Migration040,
94
95
  Migration041,
95
96
  Migration042,
97
+ Migration043,
98
+ Migration044,
96
99
  ]
97
100
 
98
101
 
99
- async def get_graph_migrations(
100
- root: Root,
101
- ) -> Sequence[GraphMigration | InternalSchemaMigration | ArbitraryMigration]:
102
+ async def get_graph_migrations(current_graph_version: int) -> Sequence[MigrationTypes]:
102
103
  applicable_migrations = []
103
104
  for migration_class in MIGRATIONS:
104
105
  migration = migration_class.init()
105
- if root.graph_version > migration.minimum_version:
106
+ if current_graph_version > migration.minimum_version:
106
107
  continue
107
108
  applicable_migrations.append(migration)
108
109
 
109
110
  return applicable_migrations
110
111
 
111
112
 
112
- def get_migration_by_number(
113
- migration_number: int | str,
114
- ) -> GraphMigration | InternalSchemaMigration | ArbitraryMigration:
113
+ def get_migration_by_number(migration_number: int | str) -> MigrationTypes:
115
114
  # Convert to string and pad with zeros if needed
116
115
  try:
117
116
  num = int(migration_number)
@@ -0,0 +1,21 @@
1
+ from infrahub.core import registry
2
+ from infrahub.core.branch import Branch
3
+ from infrahub.core.schema import SchemaRoot, internal_schema
4
+ from infrahub.core.schema.manager import SchemaManager
5
+ from infrahub.core.schema.schema_branch import SchemaBranch
6
+ from infrahub.database import InfrahubDatabase
7
+ from infrahub.exceptions import InitializationError
8
+
9
+
10
+ async def get_or_load_schema_branch(db: InfrahubDatabase, branch: Branch) -> SchemaBranch:
11
+ try:
12
+ if registry.schema.has_schema_branch(branch.name):
13
+ return registry.schema.get_schema_branch(branch.name)
14
+ except InitializationError:
15
+ schema_manager = SchemaManager()
16
+ registry.schema = schema_manager
17
+ internal_schema_root = SchemaRoot(**internal_schema)
18
+ registry.schema.register_schema(schema=internal_schema_root)
19
+ schema_branch = await registry.schema.load_schema_from_db(db=db, branch=branch)
20
+ registry.schema.set_schema_branch(name=branch.name, schema=schema_branch)
21
+ return schema_branch
@@ -286,7 +286,7 @@ class Migration013AddInternalStatusData(AttributeAddQuery):
286
286
  kwargs.pop("branch", None)
287
287
 
288
288
  super().__init__(
289
- node_kind="CoreGenericRepository",
289
+ node_kinds=["CoreGenericRepository"],
290
290
  attribute_name="internal_status",
291
291
  attribute_kind="Dropdown",
292
292
  branch_support=BranchSupportType.LOCAL.value,
@@ -3,13 +3,10 @@ from __future__ import annotations
3
3
  from dataclasses import dataclass
4
4
  from typing import TYPE_CHECKING, Any
5
5
 
6
- from rich.console import Console
7
-
8
6
  from infrahub.constants.database import IndexType
9
7
  from infrahub.core.attribute import MAX_STRING_LENGTH
10
- from infrahub.core.migrations.shared import MigrationResult
8
+ from infrahub.core.migrations.shared import MigrationResult, get_migration_console
11
9
  from infrahub.core.query import Query, QueryType
12
- from infrahub.core.timestamp import Timestamp
13
10
  from infrahub.database.index import IndexItem
14
11
  from infrahub.database.neo4j import IndexManagerNeo4j
15
12
  from infrahub.log import get_logger
@@ -467,13 +464,11 @@ class Migration037(ArbitraryMigration):
467
464
  return result
468
465
 
469
466
  async def execute(self, db: InfrahubDatabase) -> MigrationResult: # noqa: PLR0915
470
- console = Console()
467
+ console = get_migration_console()
471
468
  result = MigrationResult()
472
469
 
473
470
  # find the active schema attributes that have a LARGE_ATTRIBUTE_TYPE kind on all branches
474
- console.print(
475
- f"{Timestamp().to_string()} Determining schema attribute types and timestamps on all branches...", end=""
476
- )
471
+ console.print("Determining schema attribute types and timestamps on all branches...", end="")
477
472
  get_large_attribute_types_query = await GetLargeAttributeTypesQuery.init(db=db)
478
473
  await get_large_attribute_types_query.execute(db=db)
479
474
  schema_attribute_timeframes = get_large_attribute_types_query.get_large_attribute_type_timeframes()
@@ -481,10 +476,7 @@ class Migration037(ArbitraryMigration):
481
476
 
482
477
  # find which schema attributes are large_types in the default branch, but updated to non-large_type on other branches
483
478
  # {(kind, attr_name): SchemaAttributeTimeframe}
484
- console.print(
485
- f"{Timestamp().to_string()} Determining which schema attributes have been updated to non-large_type on non-default branches...",
486
- end="",
487
- )
479
+ console.print("Determining schema attribute updates on non-default branches...", end="")
488
480
  main_schema_attribute_timeframes_map: dict[tuple[str, str], SchemaAttributeTimeframe] = {}
489
481
  for schema_attr_time in schema_attribute_timeframes:
490
482
  if schema_attr_time.is_default_branch:
@@ -508,7 +500,7 @@ class Migration037(ArbitraryMigration):
508
500
  console.print("done")
509
501
 
510
502
  # drop the index on the AttributeValueNonIndexed vertex, there won't be any at this point anyway
511
- console.print(f"{Timestamp().to_string()} Dropping index on AttributeValueIndexed vertices...", end="")
503
+ console.print("Dropping index on AttributeValueIndexed vertices...", end="")
512
504
  index_manager = IndexManagerNeo4j(db=db)
513
505
  index_manager.init(nodes=[AV_INDEXED_INDEX], rels=[])
514
506
  await index_manager.drop()
@@ -516,7 +508,7 @@ class Migration037(ArbitraryMigration):
516
508
 
517
509
  # create the temporary non-indexed attribute value vertices for LARGE_ATTRIBUTE_TYPE attributes
518
510
  # start with default branch
519
- console.print(f"{Timestamp().to_string()} Update non-indexed attribute values with temporary label...", end="")
511
+ console.print("Creating temporary non-indexed attribute values for large attribute types...", end="")
520
512
  large_schema_attribute_timeframes = [
521
513
  schema_attr_time for schema_attr_time in schema_attribute_timeframes if schema_attr_time.is_large_type
522
514
  ]
@@ -528,10 +520,7 @@ class Migration037(ArbitraryMigration):
528
520
  console.print("done")
529
521
 
530
522
  # re-index attribute values on branches where the type was updated to non-large_type
531
- console.print(
532
- f"{Timestamp().to_string()} Indexing attribute values on branches where the attribute schema was updated to a non-large_type...",
533
- end="",
534
- )
523
+ console.print("Re-indexing attribute values on branches updated to non-large types...", end="")
535
524
  for schema_attr_time in large_type_reverts:
536
525
  revert_non_index_on_branch_query = await RevertNonIndexOnBranchQuery.init(
537
526
  db=db, schema_attribute_timeframe=schema_attr_time
@@ -540,27 +529,19 @@ class Migration037(ArbitraryMigration):
540
529
  console.print("done")
541
530
 
542
531
  # set the AttributeValue vertices to be AttributeValueIndexed
543
- console.print(
544
- f"{Timestamp().to_string()} Update all AttributeValue vertices to add the AttributeValueIndexed label...",
545
- end="",
546
- )
532
+ console.print("Adding AttributeValueIndexed label to AttributeValue vertices...", end="")
547
533
  set_attribute_value_indexed_query = await SetAttributeValueIndexedQuery.init(db=db)
548
534
  await set_attribute_value_indexed_query.execute(db=db)
549
535
  console.print("done")
550
536
 
551
537
  # set AttributeValueNonIndexed vertices to just AttributeValue
552
- console.print(
553
- f"{Timestamp().to_string()} Update all AttributeValueNonIndexed vertices to be AttributeValue (no index)...",
554
- end="",
555
- )
538
+ console.print("Restoring AttributeValue label on AttributeValueNonIndexed vertices...", end="")
556
539
  finalize_attribute_value_non_indexed_query = await FinalizeAttributeValueNonIndexedQuery.init(db=db)
557
540
  await finalize_attribute_value_non_indexed_query.execute(db=db)
558
541
  console.print("done")
559
542
 
560
543
  # de-index all attribute values too large to be indexed
561
- console.print(
562
- f"{Timestamp().to_string()} De-index any legacy attribute data that is too large to be indexed...", end=""
563
- )
544
+ console.print("De-indexing legacy attribute data exceeding index limits...", end="")
564
545
  de_index_large_attribute_values_query = await DeIndexLargeAttributeValuesQuery.init(
565
546
  db=db, max_value_size=MAX_STRING_LENGTH
566
547
  )
@@ -568,7 +549,7 @@ class Migration037(ArbitraryMigration):
568
549
  console.print("done")
569
550
 
570
551
  # add the index back to the AttributeValueNonIndexed vertex
571
- console.print(f"{Timestamp().to_string()} Add the index back to the AttributeValueIndexed label...", end="")
552
+ console.print("Adding index back to the AttributeValueIndexed label...", end="")
572
553
  index_manager = IndexManagerNeo4j(db=db)
573
554
  index_manager.init(nodes=[AV_INDEXED_INDEX], rels=[])
574
555
  await index_manager.add()
@@ -4,14 +4,13 @@ import ipaddress
4
4
  from dataclasses import dataclass
5
5
  from typing import TYPE_CHECKING, Any
6
6
 
7
- from rich.console import Console
8
7
  from rich.progress import Progress
9
8
 
10
9
  from infrahub.core.branch.models import Branch
11
10
  from infrahub.core.constants import InfrahubKind
12
11
  from infrahub.core.initialization import initialization
13
12
  from infrahub.core.ipam.reconciler import IpamReconciler
14
- from infrahub.core.migrations.shared import MigrationResult
13
+ from infrahub.core.migrations.shared import MigrationResult, get_migration_console
15
14
  from infrahub.core.query import Query, QueryType
16
15
  from infrahub.lock import initialize_lock
17
16
  from infrahub.log import get_logger
@@ -235,13 +234,13 @@ class Migration039(ArbitraryMigration):
235
234
  return MigrationResult()
236
235
 
237
236
  async def execute(self, db: InfrahubDatabase) -> MigrationResult:
238
- console = Console()
237
+ console = get_migration_console()
239
238
  result = MigrationResult()
240
239
  # load schemas from database into registry
241
240
  initialize_lock()
242
241
  await initialization(db=db)
243
242
 
244
- console.print("Identifying IP prefixes/addresses to reconcile...", end="")
243
+ console.print("Identifying IP prefixes and addresses to reconcile...", end="")
245
244
  find_nodes_query = await FindNodesToReconcileQuery.init(db=db)
246
245
  await find_nodes_query.execute(db=db)
247
246
  console.print("done")
@@ -250,16 +249,17 @@ class Migration039(ArbitraryMigration):
250
249
  # reconciler cannot correctly handle a prefix that is its own parent
251
250
  ip_node_details_list = find_nodes_query.get_nodes_to_reconcile()
252
251
  uuids_to_check = {ip_node_details.node_uuid for ip_node_details in ip_node_details_list}
253
- console.print(f"{len(ip_node_details_list)} IP prefixes/addresses will be reconciled")
252
+ console.log(f"{len(ip_node_details_list)} IP prefixes or addresses will be reconciled.")
254
253
 
255
- console.print("Deleting any self-parent relationships...", end="")
254
+ console.print("Deleting self-parent relationships prior to reconciliation...", end="")
256
255
  delete_self_parent_relationships_query = await DeleteSelfParentRelationshipsQuery.init(
257
256
  db=db, uuids_to_check=list(uuids_to_check)
258
257
  )
259
258
  await delete_self_parent_relationships_query.execute(db=db)
260
259
  console.print("done")
261
260
 
262
- with Progress() as progress:
261
+ console.log("Reconciling IP prefixes and addresses across branches...")
262
+ with Progress(console=console) as progress:
263
263
  reconcile_task = progress.add_task("Reconciling IP prefixes/addresses...", total=len(ip_node_details_list))
264
264
 
265
265
  for ip_node_details in ip_node_details_list:
@@ -271,4 +271,6 @@ class Migration039(ArbitraryMigration):
271
271
  )
272
272
  progress.update(reconcile_task, advance=1)
273
273
 
274
+ console.log("IP prefix and address reconciliation complete.")
275
+
274
276
  return result
@@ -0,0 +1,81 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any, Sequence
4
+
5
+ from infrahub.core.migrations.shared import MigrationResult
6
+ from infrahub.core.query import Query, QueryType
7
+
8
+ from ..shared import GraphMigration
9
+
10
+ if TYPE_CHECKING:
11
+ from infrahub.database import InfrahubDatabase
12
+
13
+
14
+ class DeleteDuplicatedAttributesQuery(Query):
15
+ name: str = "delete_duplicated_attributes"
16
+ type: QueryType = QueryType.WRITE
17
+ insert_return: bool = False
18
+ insert_limit: bool = False
19
+
20
+ async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
21
+ query = """
22
+ // -------------
23
+ // get all the Nodes linked to multiple Attributes with the same name to drastically reduce the search space
24
+ // -------------
25
+ MATCH (n:Node)-[:HAS_ATTRIBUTE]->(attr:Attribute)
26
+ WITH DISTINCT n, attr
27
+ WITH n, attr.name AS attr_name, count(*) AS num_attrs
28
+ WHERE num_attrs > 1
29
+ // -------------
30
+ // for each Node-attr_name pair, get the possible duplicate Attributes
31
+ // -------------
32
+ MATCH (n)-[:HAS_ATTRIBUTE]->(dup_attr:Attribute {name: attr_name})
33
+ WITH DISTINCT n, dup_attr
34
+ // -------------
35
+ // get the branch(es) for each possible duplicate Attribute
36
+ // -------------
37
+ CALL (n, dup_attr) {
38
+ MATCH (n)-[r:HAS_ATTRIBUTE {status: "active"}]->(dup_attr)
39
+ WHERE r.to IS NULL
40
+ AND NOT exists((n)-[:HAS_ATTRIBUTE {status: "deleted", branch: r.branch}]->(dup_attr))
41
+ RETURN r.branch AS branch
42
+ }
43
+ // -------------
44
+ // get the latest update time for each duplicate Attribute on each branch
45
+ // -------------
46
+ CALL (dup_attr, branch) {
47
+ MATCH (dup_attr)-[r {branch: branch}]-()
48
+ RETURN max(r.from) AS latest_update
49
+ }
50
+ // -------------
51
+ // order the duplicate Attributes by latest update time
52
+ // -------------
53
+ WITH n, dup_attr, branch, latest_update
54
+ ORDER BY n, branch, dup_attr.name, latest_update DESC
55
+ // -------------
56
+ // for any Node-dup_attr_name pairs with multiple duplicate Attributes, keep the Attribute with the latest update
57
+ // on this branch and delete all the other edges on this branch for this Attribute
58
+ // -------------
59
+ WITH n, branch, dup_attr.name AS dup_attr_name, collect(dup_attr) AS dup_attrs_reverse_chronological
60
+ WHERE size(dup_attrs_reverse_chronological) > 1
61
+ WITH branch, tail(dup_attrs_reverse_chronological) AS dup_attrs_to_delete
62
+ UNWIND dup_attrs_to_delete AS dup_attr_to_delete
63
+ MATCH (dup_attr_to_delete)-[r {branch: branch}]-()
64
+ DELETE r
65
+ // -------------
66
+ // delete any orphaned Attributes
67
+ // -------------
68
+ WITH DISTINCT dup_attr_to_delete
69
+ WHERE NOT exists((dup_attr_to_delete)--())
70
+ DELETE dup_attr_to_delete
71
+ """
72
+ self.add_to_query(query)
73
+
74
+
75
+ class Migration040(GraphMigration):
76
+ name: str = "040_duplicated_attributes"
77
+ queries: Sequence[type[Query]] = [DeleteDuplicatedAttributesQuery]
78
+ minimum_version: int = 39
79
+
80
+ async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
81
+ return MigrationResult()