infrahub-server 1.5.0b1__py3-none-any.whl → 1.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (171) hide show
  1. infrahub/api/dependencies.py +4 -13
  2. infrahub/api/internal.py +2 -0
  3. infrahub/api/oauth2.py +13 -19
  4. infrahub/api/oidc.py +15 -21
  5. infrahub/api/schema.py +24 -3
  6. infrahub/api/transformation.py +22 -20
  7. infrahub/artifacts/models.py +2 -1
  8. infrahub/auth.py +137 -3
  9. infrahub/cli/__init__.py +2 -0
  10. infrahub/cli/db.py +158 -155
  11. infrahub/cli/dev.py +118 -0
  12. infrahub/cli/tasks.py +46 -0
  13. infrahub/cli/upgrade.py +56 -9
  14. infrahub/computed_attribute/tasks.py +20 -8
  15. infrahub/core/attribute.py +10 -2
  16. infrahub/core/branch/enums.py +1 -1
  17. infrahub/core/branch/models.py +7 -3
  18. infrahub/core/branch/tasks.py +68 -7
  19. infrahub/core/constants/__init__.py +3 -0
  20. infrahub/core/diff/calculator.py +2 -2
  21. infrahub/core/diff/query/artifact.py +1 -0
  22. infrahub/core/diff/query/delete_query.py +9 -5
  23. infrahub/core/diff/query/field_summary.py +1 -0
  24. infrahub/core/diff/query/merge.py +39 -23
  25. infrahub/core/graph/__init__.py +1 -1
  26. infrahub/core/initialization.py +5 -2
  27. infrahub/core/migrations/__init__.py +3 -0
  28. infrahub/core/migrations/exceptions.py +4 -0
  29. infrahub/core/migrations/graph/__init__.py +12 -13
  30. infrahub/core/migrations/graph/load_schema_branch.py +21 -0
  31. infrahub/core/migrations/graph/m013_convert_git_password_credential.py +1 -1
  32. infrahub/core/migrations/graph/m037_index_attr_vals.py +11 -30
  33. infrahub/core/migrations/graph/m039_ipam_reconcile.py +9 -7
  34. infrahub/core/migrations/graph/m040_duplicated_attributes.py +81 -0
  35. infrahub/core/migrations/graph/m041_deleted_dup_edges.py +149 -0
  36. infrahub/core/migrations/graph/m042_profile_attrs_in_db.py +147 -0
  37. infrahub/core/migrations/graph/m043_create_hfid_display_label_in_db.py +164 -0
  38. infrahub/core/migrations/graph/m044_backfill_hfid_display_label_in_db.py +864 -0
  39. infrahub/core/migrations/query/__init__.py +7 -8
  40. infrahub/core/migrations/query/attribute_add.py +8 -6
  41. infrahub/core/migrations/query/attribute_remove.py +134 -0
  42. infrahub/core/migrations/runner.py +54 -0
  43. infrahub/core/migrations/schema/attribute_kind_update.py +9 -3
  44. infrahub/core/migrations/schema/attribute_supports_profile.py +90 -0
  45. infrahub/core/migrations/schema/node_attribute_add.py +30 -2
  46. infrahub/core/migrations/schema/node_attribute_remove.py +13 -109
  47. infrahub/core/migrations/schema/node_kind_update.py +2 -1
  48. infrahub/core/migrations/schema/node_remove.py +2 -1
  49. infrahub/core/migrations/schema/placeholder_dummy.py +3 -2
  50. infrahub/core/migrations/shared.py +62 -14
  51. infrahub/core/models.py +2 -2
  52. infrahub/core/node/__init__.py +42 -12
  53. infrahub/core/node/create.py +46 -63
  54. infrahub/core/node/lock_utils.py +70 -44
  55. infrahub/core/node/resource_manager/ip_address_pool.py +2 -1
  56. infrahub/core/node/resource_manager/ip_prefix_pool.py +2 -1
  57. infrahub/core/node/resource_manager/number_pool.py +2 -1
  58. infrahub/core/query/attribute.py +55 -0
  59. infrahub/core/query/diff.py +61 -16
  60. infrahub/core/query/ipam.py +16 -4
  61. infrahub/core/query/node.py +51 -43
  62. infrahub/core/query/relationship.py +1 -0
  63. infrahub/core/relationship/model.py +10 -5
  64. infrahub/core/schema/__init__.py +56 -0
  65. infrahub/core/schema/attribute_schema.py +4 -0
  66. infrahub/core/schema/definitions/core/check.py +1 -1
  67. infrahub/core/schema/definitions/core/transform.py +1 -1
  68. infrahub/core/schema/definitions/internal.py +2 -2
  69. infrahub/core/schema/generated/attribute_schema.py +2 -2
  70. infrahub/core/schema/manager.py +22 -1
  71. infrahub/core/schema/schema_branch.py +180 -22
  72. infrahub/core/schema/schema_branch_display.py +12 -0
  73. infrahub/core/schema/schema_branch_hfid.py +6 -0
  74. infrahub/core/validators/uniqueness/checker.py +2 -1
  75. infrahub/database/__init__.py +0 -13
  76. infrahub/database/graph.py +21 -0
  77. infrahub/display_labels/tasks.py +13 -7
  78. infrahub/events/branch_action.py +27 -1
  79. infrahub/generators/tasks.py +3 -7
  80. infrahub/git/base.py +4 -1
  81. infrahub/git/integrator.py +1 -1
  82. infrahub/git/models.py +2 -1
  83. infrahub/git/repository.py +22 -5
  84. infrahub/git/tasks.py +66 -10
  85. infrahub/git/utils.py +123 -1
  86. infrahub/graphql/analyzer.py +9 -0
  87. infrahub/graphql/api/endpoints.py +14 -4
  88. infrahub/graphql/manager.py +4 -9
  89. infrahub/graphql/mutations/branch.py +5 -0
  90. infrahub/graphql/mutations/convert_object_type.py +11 -1
  91. infrahub/graphql/mutations/display_label.py +17 -10
  92. infrahub/graphql/mutations/hfid.py +17 -10
  93. infrahub/graphql/mutations/ipam.py +54 -35
  94. infrahub/graphql/mutations/main.py +27 -28
  95. infrahub/graphql/mutations/proposed_change.py +6 -0
  96. infrahub/graphql/schema_sort.py +170 -0
  97. infrahub/graphql/types/branch.py +4 -1
  98. infrahub/graphql/types/enums.py +3 -0
  99. infrahub/hfid/tasks.py +13 -7
  100. infrahub/lock.py +52 -12
  101. infrahub/message_bus/types.py +3 -1
  102. infrahub/permissions/constants.py +2 -0
  103. infrahub/profiles/queries/get_profile_data.py +4 -5
  104. infrahub/proposed_change/tasks.py +66 -23
  105. infrahub/server.py +6 -2
  106. infrahub/services/__init__.py +2 -2
  107. infrahub/services/adapters/http/__init__.py +5 -0
  108. infrahub/services/adapters/workflow/worker.py +14 -3
  109. infrahub/task_manager/event.py +5 -0
  110. infrahub/task_manager/models.py +7 -0
  111. infrahub/task_manager/task.py +73 -0
  112. infrahub/trigger/setup.py +13 -4
  113. infrahub/trigger/tasks.py +3 -0
  114. infrahub/workers/dependencies.py +10 -1
  115. infrahub/workers/infrahub_async.py +10 -2
  116. infrahub/workflows/catalogue.py +8 -0
  117. infrahub/workflows/initialization.py +5 -0
  118. infrahub/workflows/utils.py +2 -1
  119. infrahub_sdk/analyzer.py +1 -1
  120. infrahub_sdk/batch.py +2 -2
  121. infrahub_sdk/branch.py +14 -2
  122. infrahub_sdk/checks.py +1 -1
  123. infrahub_sdk/client.py +15 -14
  124. infrahub_sdk/config.py +29 -2
  125. infrahub_sdk/ctl/branch.py +3 -0
  126. infrahub_sdk/ctl/cli_commands.py +2 -0
  127. infrahub_sdk/ctl/exceptions.py +1 -1
  128. infrahub_sdk/ctl/schema.py +22 -7
  129. infrahub_sdk/ctl/task.py +110 -0
  130. infrahub_sdk/exceptions.py +18 -18
  131. infrahub_sdk/graphql/query.py +2 -2
  132. infrahub_sdk/node/attribute.py +1 -1
  133. infrahub_sdk/node/property.py +1 -1
  134. infrahub_sdk/node/related_node.py +3 -3
  135. infrahub_sdk/node/relationship.py +4 -6
  136. infrahub_sdk/object_store.py +2 -2
  137. infrahub_sdk/operation.py +1 -1
  138. infrahub_sdk/protocols_generator/generator.py +1 -1
  139. infrahub_sdk/pytest_plugin/exceptions.py +9 -9
  140. infrahub_sdk/pytest_plugin/items/base.py +1 -1
  141. infrahub_sdk/pytest_plugin/items/check.py +1 -1
  142. infrahub_sdk/pytest_plugin/items/python_transform.py +1 -1
  143. infrahub_sdk/repository.py +1 -1
  144. infrahub_sdk/schema/__init__.py +33 -5
  145. infrahub_sdk/spec/models.py +7 -0
  146. infrahub_sdk/spec/object.py +41 -102
  147. infrahub_sdk/spec/processors/__init__.py +0 -0
  148. infrahub_sdk/spec/processors/data_processor.py +10 -0
  149. infrahub_sdk/spec/processors/factory.py +34 -0
  150. infrahub_sdk/spec/processors/range_expand_processor.py +56 -0
  151. infrahub_sdk/task/exceptions.py +4 -4
  152. infrahub_sdk/task/manager.py +2 -2
  153. infrahub_sdk/task/models.py +6 -4
  154. infrahub_sdk/timestamp.py +1 -1
  155. infrahub_sdk/transfer/exporter/json.py +1 -1
  156. infrahub_sdk/transfer/importer/json.py +1 -1
  157. infrahub_sdk/transforms.py +1 -1
  158. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/METADATA +4 -2
  159. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/RECORD +168 -152
  160. infrahub_testcontainers/container.py +144 -6
  161. infrahub_testcontainers/docker-compose-cluster.test.yml +5 -0
  162. infrahub_testcontainers/docker-compose.test.yml +5 -0
  163. infrahub_testcontainers/helpers.py +19 -4
  164. infrahub_testcontainers/models.py +8 -6
  165. infrahub_testcontainers/performance_test.py +6 -4
  166. infrahub/core/migrations/graph/m040_profile_attrs_in_db.py +0 -166
  167. infrahub/core/migrations/graph/m041_create_hfid_display_label_in_db.py +0 -97
  168. infrahub/core/migrations/graph/m042_backfill_hfid_display_label_in_db.py +0 -86
  169. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/LICENSE.txt +0 -0
  170. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/WHEEL +0 -0
  171. {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.1.dist-info}/entry_points.txt +0 -0
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  import os
2
4
  import shutil
3
5
  import time
@@ -72,7 +74,10 @@ class InfrahubDockerCompose(DockerCompose):
72
74
 
73
75
  @classmethod
74
76
  def init(
75
- cls, directory: Path | None = None, version: str | None = None, deployment_type: str | None = None
77
+ cls,
78
+ directory: Path | None = None,
79
+ version: str | None = None,
80
+ deployment_type: str | None = None,
76
81
  ) -> Self:
77
82
  if not directory:
78
83
  directory = Path.cwd()
@@ -84,7 +89,11 @@ class InfrahubDockerCompose(DockerCompose):
84
89
  if version == "local" and infrahub_image_version:
85
90
  version = infrahub_image_version
86
91
 
87
- compose = cls(project_name=cls.generate_project_name(), context=directory, deployment_type=deployment_type)
92
+ compose = cls(
93
+ project_name=cls.generate_project_name(),
94
+ context=directory,
95
+ deployment_type=deployment_type,
96
+ )
88
97
  compose.create_docker_file(directory=directory)
89
98
  compose.create_env_file(directory=directory, version=version)
90
99
 
@@ -152,6 +161,8 @@ class InfrahubDockerCompose(DockerCompose):
152
161
  "INFRAHUB_TESTING_TASKMGR_BACKGROUND_SVC_REPLICAS": "1",
153
162
  "PREFECT_MESSAGING_BROKER": "prefect_redis.messaging",
154
163
  "PREFECT_MESSAGING_CACHE": "prefect_redis.messaging",
164
+ "PREFECT_SERVER_EVENTS_CAUSAL_ORDERING": "prefect_redis.ordering",
165
+ "PREFECT_SERVER_CONCURRENCY_LEASE_STORAGE": "prefect_redis.lease_storage",
155
166
  "PREFECT__SERVER_WEBSERVER_ONLY": "true",
156
167
  "PREFECT_API_DATABASE_MIGRATE_ON_START": "false",
157
168
  "PREFECT_API_BLOCKS_REGISTER_ON_START": "false",
@@ -237,7 +248,12 @@ class InfrahubDockerCompose(DockerCompose):
237
248
  for service_name, service_data in INFRAHUB_SERVICES.items()
238
249
  }
239
250
 
240
- def database_create_backup(self, backup_name: str = "neo4j_database.backup", dest_dir: Path | None = None) -> None:
251
+ def database_create_backup(
252
+ self,
253
+ backup_name: str = "neo4j_database.backup",
254
+ dest_dir: Path | None = None,
255
+ compress: bool = False,
256
+ ) -> None:
241
257
  assert self.use_neo4j_enterprise
242
258
 
243
259
  self.exec_in_container(
@@ -245,7 +261,7 @@ class InfrahubDockerCompose(DockerCompose):
245
261
  "neo4j-admin",
246
262
  "database",
247
263
  "backup",
248
- "--compress=false",
264
+ f"--compress={'true' if compress else 'false'}",
249
265
  "--to-path",
250
266
  str(self.internal_backup_dir),
251
267
  ],
@@ -279,7 +295,14 @@ class InfrahubDockerCompose(DockerCompose):
279
295
  self.start_container(service_name=service_name)
280
296
 
281
297
  self.exec_in_container(
282
- command=["cypher-shell", "-u", "neo4j", "-p", "admin", "STOP DATABASE neo4j;"],
298
+ command=[
299
+ "cypher-shell",
300
+ "-u",
301
+ "neo4j",
302
+ "-p",
303
+ "admin",
304
+ "STOP DATABASE neo4j;",
305
+ ],
283
306
  service_name=service_name,
284
307
  )
285
308
 
@@ -364,7 +387,14 @@ class InfrahubDockerCompose(DockerCompose):
364
387
  time.sleep(10)
365
388
 
366
389
  self.exec_in_container(
367
- command=["cypher-shell", "-u", "neo4j", "-p", "admin", "DROP DATABASE neo4j;"],
390
+ command=[
391
+ "cypher-shell",
392
+ "-u",
393
+ "neo4j",
394
+ "-p",
395
+ "admin",
396
+ "DROP DATABASE neo4j;",
397
+ ],
368
398
  service_name=service_name,
369
399
  )
370
400
 
@@ -513,3 +543,111 @@ class InfrahubDockerCompose(DockerCompose):
513
543
  )
514
544
  self.start()
515
545
  print("Database restored successfully")
546
+
547
+ def task_manager_create_backup(self, backup_name: str = "prefect.dump", dest_dir: Path | None = None) -> Path:
548
+ """Create a backup of the task manager PostgreSQL database using ``pg_dump``.
549
+
550
+ Args:
551
+ backup_name: Name of the archive file to create. Defaults to ``prefect.dump``.
552
+ dest_dir: Optional host directory where the backup should be copied after it is
553
+ produced. When omitted, the backup remains in ``external_backup_dir``.
554
+
555
+ Returns:
556
+ Path to the backup archive on the host filesystem.
557
+
558
+ Raises:
559
+ FileNotFoundError: If the pg_dump command completes but no archive is produced.
560
+ """
561
+
562
+ service_name = "task-manager-db"
563
+
564
+ try:
565
+ self.get_container(service_name=service_name)
566
+ except ContainerIsNotRunning:
567
+ self.start_container(service_name=service_name)
568
+
569
+ self.external_backup_dir.mkdir(parents=True, exist_ok=True)
570
+
571
+ internal_backup_path = self.internal_backup_dir / backup_name
572
+ dump_command = [
573
+ "pg_dump",
574
+ "--format=custom",
575
+ "--blobs",
576
+ "--no-owner",
577
+ "--no-privileges",
578
+ "--dbname=postgresql://postgres:postgres@localhost:5432/prefect",
579
+ f"--file={internal_backup_path}",
580
+ ]
581
+ self.exec_in_container(command=dump_command, service_name=service_name)
582
+
583
+ source_path = self.external_backup_dir / backup_name
584
+ if not source_path.exists():
585
+ raise FileNotFoundError(f"Backup file {source_path} was not created")
586
+
587
+ final_path = source_path
588
+ if dest_dir:
589
+ dest_dir.mkdir(parents=True, exist_ok=True)
590
+ if dest_dir.resolve() != self.external_backup_dir.resolve():
591
+ final_path = dest_dir / backup_name
592
+ shutil.copy(source_path, final_path)
593
+
594
+ return final_path
595
+
596
+ def task_manager_restore_backup(self, backup_file: Path) -> None:
597
+ """Restore the task manager PostgreSQL database from a ``pg_restore`` archive.
598
+
599
+ Args:
600
+ backup_file: Path to the backup archive on the host filesystem.
601
+
602
+ Raises:
603
+ FileNotFoundError: If the provided backup archive does not exist.
604
+ """
605
+
606
+ if not backup_file.exists():
607
+ raise FileNotFoundError(f"Backup file {backup_file} does not exist")
608
+
609
+ service_name = "task-manager-db"
610
+
611
+ try:
612
+ self.get_container(service_name=service_name)
613
+ except ContainerIsNotRunning:
614
+ self.start_container(service_name=service_name)
615
+
616
+ self.external_backup_dir.mkdir(parents=True, exist_ok=True)
617
+ target_path = self.external_backup_dir / backup_file.name
618
+ shutil.copy(backup_file, target_path)
619
+
620
+ admin_dsn = "postgresql://postgres:postgres@localhost:5432/postgres"
621
+ prefect_dsn = "postgresql://postgres:postgres@localhost:5432/prefect"
622
+ internal_backup_path = self.internal_backup_dir / backup_file.name
623
+
624
+ terminate_sessions_command = [
625
+ "psql",
626
+ f"--dbname={admin_dsn}",
627
+ "--command",
628
+ "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = 'prefect';",
629
+ ]
630
+ drop_database_command = [
631
+ "psql",
632
+ f"--dbname={admin_dsn}",
633
+ "--command",
634
+ "DROP DATABASE IF EXISTS prefect WITH (FORCE);",
635
+ ]
636
+ create_database_command = [
637
+ "psql",
638
+ f"--dbname={admin_dsn}",
639
+ "--command",
640
+ "CREATE DATABASE prefect OWNER postgres;",
641
+ ]
642
+ restore_command = [
643
+ "pg_restore",
644
+ "--no-owner",
645
+ "--role=postgres",
646
+ f"--dbname={prefect_dsn}",
647
+ str(internal_backup_path),
648
+ ]
649
+
650
+ self.exec_in_container(command=terminate_sessions_command, service_name=service_name)
651
+ self.exec_in_container(command=drop_database_command, service_name=service_name)
652
+ self.exec_in_container(command=create_database_command, service_name=service_name)
653
+ self.exec_in_container(command=restore_command, service_name=service_name)
@@ -184,6 +184,8 @@ services:
184
184
 
185
185
  PREFECT_MESSAGING_BROKER:
186
186
  PREFECT_MESSAGING_CACHE:
187
+ PREFECT_SERVER_EVENTS_CAUSAL_ORDERING:
188
+ PREFECT_SERVER_CONCURRENCY_LEASE_STORAGE:
187
189
  PREFECT__SERVER_WEBSERVER_ONLY:
188
190
  PREFECT_API_DATABASE_MIGRATE_ON_START:
189
191
  PREFECT_API_BLOCKS_REGISTER_ON_START:
@@ -225,6 +227,8 @@ services:
225
227
  INFRAHUB_CACHE_ADDRESS: ${INFRAHUB_TESTING_CACHE_ADDRESS}
226
228
  PREFECT_MESSAGING_BROKER: prefect_redis.messaging
227
229
  PREFECT_MESSAGING_CACHE: prefect_redis.messaging
230
+ PREFECT_SERVER_EVENTS_CAUSAL_ORDERING: prefect_redis.ordering
231
+ PREFECT_SERVER_CONCURRENCY_LEASE_STORAGE: prefect_redis.lease_storage
228
232
  PREFECT_REDIS_MESSAGING_HOST: "${INFRAHUB_TESTING_CACHE_ADDRESS:-cache}"
229
233
  PREFECT_REDIS_MESSAGING_DB: "1"
230
234
  PREFECT_REDIS_MESSAGING_CONSUMER_MIN_IDLE_TIME: "30"
@@ -244,6 +248,7 @@ services:
244
248
  - POSTGRES_DB=prefect
245
249
  volumes:
246
250
  - workflow_db:/var/lib/postgresql/data
251
+ - "./${INFRAHUB_TESTING_LOCAL_DB_BACKUP_DIRECTORY}:${INFRAHUB_TESTING_INTERNAL_DB_BACKUP_DIRECTORY}"
247
252
  healthcheck:
248
253
  test: ["CMD-SHELL", "pg_isready"]
249
254
  interval: 10s
@@ -95,6 +95,8 @@ services:
95
95
 
96
96
  PREFECT_MESSAGING_BROKER:
97
97
  PREFECT_MESSAGING_CACHE:
98
+ PREFECT_SERVER_EVENTS_CAUSAL_ORDERING:
99
+ PREFECT_SERVER_CONCURRENCY_LEASE_STORAGE:
98
100
  PREFECT__SERVER_WEBSERVER_ONLY:
99
101
  PREFECT_API_DATABASE_MIGRATE_ON_START:
100
102
  PREFECT_API_BLOCKS_REGISTER_ON_START:
@@ -136,6 +138,8 @@ services:
136
138
  INFRAHUB_CACHE_ADDRESS: ${INFRAHUB_TESTING_CACHE_ADDRESS}
137
139
  PREFECT_MESSAGING_BROKER: prefect_redis.messaging
138
140
  PREFECT_MESSAGING_CACHE: prefect_redis.messaging
141
+ PREFECT_SERVER_EVENTS_CAUSAL_ORDERING: prefect_redis.ordering
142
+ PREFECT_SERVER_CONCURRENCY_LEASE_STORAGE: prefect_redis.lease_storage
139
143
  PREFECT_REDIS_MESSAGING_HOST: "${INFRAHUB_TESTING_CACHE_ADDRESS:-cache}"
140
144
  PREFECT_REDIS_MESSAGING_DB: "1"
141
145
  PREFECT_REDIS_MESSAGING_CONSUMER_MIN_IDLE_TIME: "30"
@@ -155,6 +159,7 @@ services:
155
159
  - POSTGRES_DB=prefect
156
160
  volumes:
157
161
  - workflow_db:/var/lib/postgresql/data
162
+ - "./${INFRAHUB_TESTING_LOCAL_DB_BACKUP_DIRECTORY}:${INFRAHUB_TESTING_INTERNAL_DB_BACKUP_DIRECTORY}"
158
163
  healthcheck:
159
164
  test: ["CMD-SHELL", "pg_isready"]
160
165
  interval: 10s
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  import os
2
4
  import subprocess # noqa: S404
3
5
  import uuid
@@ -24,13 +26,21 @@ class TestInfrahubDocker:
24
26
  env["INFRAHUB_API_TOKEN"] = PROJECT_ENV_VARIABLES["INFRAHUB_TESTING_INITIAL_ADMIN_TOKEN"]
25
27
  env["INFRAHUB_MAX_CONCURRENT_EXECUTION"] = "1"
26
28
  result = subprocess.run( # noqa: S602
27
- f"infrahubctl run {script}", shell=True, capture_output=True, text=True, env=env, check=False
29
+ f"infrahubctl run {script}",
30
+ shell=True,
31
+ capture_output=True,
32
+ text=True,
33
+ env=env,
34
+ check=False,
28
35
  )
29
36
  return result.stdout
30
37
 
31
38
  @staticmethod
32
39
  def execute_command(
33
- command: str, address: str, concurrent_execution: int = 10, pagination_size: int = 50
40
+ command: str,
41
+ address: str,
42
+ concurrent_execution: int = 10,
43
+ pagination_size: int = 50,
34
44
  ) -> subprocess.CompletedProcess[str]:
35
45
  env = os.environ.copy()
36
46
  env["INFRAHUB_ADDRESS"] = address
@@ -79,7 +89,9 @@ class TestInfrahubDocker:
79
89
  deployment_type: str | None,
80
90
  ) -> InfrahubDockerCompose:
81
91
  return InfrahubDockerCompose.init(
82
- directory=tmp_directory, version=infrahub_version, deployment_type=deployment_type
92
+ directory=tmp_directory,
93
+ version=infrahub_version,
94
+ deployment_type=deployment_type,
83
95
  )
84
96
 
85
97
  @pytest.fixture(scope="class")
@@ -90,7 +102,10 @@ class TestInfrahubDocker:
90
102
  tests_failed_during_class = request.session.testsfailed - tests_failed_before_class
91
103
  if tests_failed_during_class > 0:
92
104
  stdout, stderr = infrahub_compose.get_logs("infrahub-server", "task-worker")
93
- warnings.warn(f"Container logs:\nStdout:\n{stdout}\nStderr:\n{stderr}", stacklevel=2)
105
+ warnings.warn(
106
+ f"Container logs:\nStdout:\n{stdout}\nStderr:\n{stderr}",
107
+ stacklevel=2,
108
+ )
94
109
  infrahub_compose.stop()
95
110
 
96
111
  request.addfinalizer(cleanup)
@@ -1,6 +1,8 @@
1
- from datetime import UTC, datetime
1
+ from __future__ import annotations
2
+
3
+ from datetime import datetime, timezone
2
4
  from enum import Enum
3
- from typing import Any
5
+ from typing import Any, Union
4
6
 
5
7
  from pydantic import BaseModel, Field
6
8
 
@@ -15,24 +17,24 @@ class ContextUnit(str, Enum):
15
17
  class MeasurementDefinition(BaseModel):
16
18
  name: str
17
19
  description: str
18
- dimensions: list[str] = Field(default_factory=dict)
20
+ dimensions: list[str] = Field(default_factory=list)
19
21
  unit: ContextUnit
20
22
 
21
23
 
22
24
  class InfrahubResultContext(BaseModel):
23
25
  name: str
24
- value: int | float | str
26
+ value: Union[int, float, str]
25
27
  unit: ContextUnit
26
28
 
27
29
 
28
30
  class InfrahubActiveMeasurementItem(BaseModel):
29
31
  definition: MeasurementDefinition
30
- start_time: datetime = Field(default_factory=lambda: datetime.now(UTC))
32
+ start_time: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
31
33
  context: dict[str, Any] = Field(default_factory=dict)
32
34
 
33
35
 
34
36
  class InfrahubMeasurementItem(BaseModel):
35
37
  name: str
36
- value: int | float | str
38
+ value: Union[int, float, str]
37
39
  unit: ContextUnit
38
40
  context: dict[str, Any] = Field(default_factory=dict)
@@ -1,6 +1,8 @@
1
+ from __future__ import annotations
2
+
1
3
  import hashlib
2
4
  import json
3
- from datetime import UTC, datetime
5
+ from datetime import datetime, timezone
4
6
  from types import TracebackType
5
7
  from typing import Any
6
8
 
@@ -35,7 +37,7 @@ class InfrahubPerformanceTest:
35
37
  self.env_vars = {}
36
38
  self.project_name = ""
37
39
  self.test_info = {}
38
- self.start_time = datetime.now(UTC)
40
+ self.start_time = datetime.now(timezone.utc)
39
41
  self.end_time: datetime | None = None
40
42
  self.results_url = results_url
41
43
  self.scraper_endpoint = ""
@@ -57,7 +59,7 @@ class InfrahubPerformanceTest:
57
59
 
58
60
  def finalize(self, session: pytest.Session) -> None:
59
61
  if self.initialized:
60
- self.end_time = datetime.now(UTC)
62
+ self.end_time = datetime.now(timezone.utc)
61
63
  self.extract_test_session_information(session)
62
64
  self.send_results()
63
65
 
@@ -129,7 +131,7 @@ class InfrahubPerformanceTest:
129
131
  if not exc_type and self.active_measurements:
130
132
  self.add_measurement(
131
133
  definition=self.active_measurements.definition,
132
- value=(datetime.now(UTC) - self.active_measurements.start_time).total_seconds() * 1000,
134
+ value=(datetime.now(timezone.utc) - self.active_measurements.start_time).total_seconds() * 1000,
133
135
  context=self.active_measurements.context,
134
136
  )
135
137
 
@@ -1,166 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from collections import defaultdict
4
- from typing import TYPE_CHECKING, Any
5
-
6
- from rich.console import Console
7
- from rich.progress import Progress
8
-
9
- from infrahub.core.branch.models import Branch
10
- from infrahub.core.initialization import initialization
11
- from infrahub.core.manager import NodeManager
12
- from infrahub.core.migrations.shared import MigrationResult
13
- from infrahub.core.query import Query, QueryType
14
- from infrahub.core.timestamp import Timestamp
15
- from infrahub.lock import initialize_lock
16
- from infrahub.log import get_logger
17
- from infrahub.profiles.node_applier import NodeProfilesApplier
18
-
19
- from ..shared import ArbitraryMigration
20
-
21
- if TYPE_CHECKING:
22
- from infrahub.core.node import Node
23
- from infrahub.database import InfrahubDatabase
24
-
25
- log = get_logger()
26
-
27
-
28
- class GetProfilesByBranchQuery(Query):
29
- """
30
- Get CoreProfile UUIDs by which branches they have attribute updates on
31
- """
32
-
33
- name = "get_profiles_by_branch"
34
- type = QueryType.READ
35
- insert_return = False
36
-
37
- async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
38
- query = """
39
- MATCH (profile:CoreProfile)-[:HAS_ATTRIBUTE]->(attr:Attribute)-[e:HAS_VALUE]->(:AttributeValue)
40
- WITH DISTINCT profile.uuid AS profile_uuid, e.branch AS branch
41
- RETURN profile_uuid, collect(branch) AS branches
42
- """
43
- self.add_to_query(query)
44
- self.return_labels = ["profile_uuid", "branches"]
45
-
46
- def get_profile_ids_by_branch(self) -> dict[str, set[str]]:
47
- """Get dictionary of branch names to set of updated profile UUIDs"""
48
- profiles_by_branch = defaultdict(set)
49
- for result in self.get_results():
50
- profile_uuid = result.get_as_type("profile_uuid", str)
51
- branches = result.get_as_type("branches", list[str])
52
- for branch in branches:
53
- profiles_by_branch[branch].add(profile_uuid)
54
- return profiles_by_branch
55
-
56
-
57
- class GetNodesWithProfileUpdatesByBranchQuery(Query):
58
- """
59
- Get Node UUIDs by which branches they have updated profiles on
60
- """
61
-
62
- name = "get_nodes_with_profile_updates_by_branch"
63
- type = QueryType.READ
64
- insert_return = False
65
-
66
- async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
67
- query = """
68
- MATCH (node:Node)-[e1:IS_RELATED]->(:Relationship {name: "node__profile"})
69
- WHERE NOT node:CoreProfile
70
- WITH DISTINCT node.uuid AS node_uuid, e1.branch AS branch
71
- RETURN node_uuid, collect(branch) AS branches
72
- """
73
- self.add_to_query(query)
74
- self.return_labels = ["node_uuid", "branches"]
75
-
76
- def get_node_ids_by_branch(self) -> dict[str, set[str]]:
77
- """Get dictionary of branch names to set of updated node UUIDs"""
78
- nodes_by_branch = defaultdict(set)
79
- for result in self.get_results():
80
- node_uuid = result.get_as_type("node_uuid", str)
81
- branches = result.get_as_type("branches", list[str])
82
- for branch in branches:
83
- nodes_by_branch[branch].add(node_uuid)
84
- return nodes_by_branch
85
-
86
-
87
- class Migration040(ArbitraryMigration):
88
- """
89
- Save profile attribute values on each node using the profile in the database
90
- For any profile that has updates on a given branch (including default branch)
91
- - run NodeProfilesApplier.apply_profiles on each node related to the profile on that branch
92
- For any node that has an updated relationship to a profile on a given branch
93
- - run NodeProfilesApplier.apply_profiles on the node on that branch
94
- """
95
-
96
- name: str = "040_profile_attrs_in_db"
97
- minimum_version: int = 39
98
-
99
- def __init__(self, *args: Any, **kwargs: Any) -> None:
100
- super().__init__(*args, **kwargs)
101
- self._appliers_by_branch: dict[str, NodeProfilesApplier] = {}
102
-
103
- async def _get_profile_applier(self, db: InfrahubDatabase, branch_name: str) -> NodeProfilesApplier:
104
- if branch_name not in self._appliers_by_branch:
105
- branch = await Branch.get_by_name(db=db, name=branch_name)
106
- self._appliers_by_branch[branch_name] = NodeProfilesApplier(db=db, branch=branch)
107
- return self._appliers_by_branch[branch_name]
108
-
109
- async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
110
- return MigrationResult()
111
-
112
- async def execute(self, db: InfrahubDatabase) -> MigrationResult:
113
- console = Console()
114
- result = MigrationResult()
115
- # load schemas from database into registry
116
- initialize_lock()
117
- await initialization(db=db)
118
-
119
- console.print("Gathering profiles for each branch...", end="")
120
- get_profiles_by_branch_query = await GetProfilesByBranchQuery.init(db=db)
121
- await get_profiles_by_branch_query.execute(db=db)
122
- profiles_ids_by_branch = get_profiles_by_branch_query.get_profile_ids_by_branch()
123
-
124
- profiles_by_branch: dict[str, list[Node]] = {}
125
- for branch_name, profile_ids in profiles_ids_by_branch.items():
126
- profiles_map = await NodeManager.get_many(db=db, branch=branch_name, ids=list(profile_ids))
127
- profiles_by_branch[branch_name] = list(profiles_map.values())
128
- console.print("done")
129
-
130
- node_ids_to_update_by_branch: dict[str, set[str]] = defaultdict(set)
131
- total_size = sum(len(profiles) for profiles in profiles_by_branch.values())
132
- with Progress() as progress:
133
- gather_nodes_task = progress.add_task(
134
- "Gathering affected objects for each profile on each branch...", total=total_size
135
- )
136
-
137
- for branch_name, profiles in profiles_by_branch.items():
138
- for profile in profiles:
139
- node_relationship_manager = profile.get_relationship("related_nodes")
140
- node_peers = await node_relationship_manager.get_db_peers(db=db)
141
- node_ids_to_update_by_branch[branch_name].update({str(peer.peer_id) for peer in node_peers})
142
- progress.update(gather_nodes_task, advance=1)
143
-
144
- console.print("Identifying nodes with profile updates by branch...", end="")
145
- get_nodes_with_profile_updates_by_branch_query = await GetNodesWithProfileUpdatesByBranchQuery.init(db=db)
146
- await get_nodes_with_profile_updates_by_branch_query.execute(db=db)
147
- nodes_ids_by_branch = get_nodes_with_profile_updates_by_branch_query.get_node_ids_by_branch()
148
- for branch_name, node_ids in nodes_ids_by_branch.items():
149
- node_ids_to_update_by_branch[branch_name].update(node_ids)
150
- console.print("done")
151
-
152
- right_now = Timestamp()
153
- total_size = sum(len(node_ids) for node_ids in node_ids_to_update_by_branch.values())
154
- with Progress() as progress:
155
- apply_task = progress.add_task("Applying profiles to nodes...", total=total_size)
156
- for branch_name, node_ids in node_ids_to_update_by_branch.items():
157
- applier = await self._get_profile_applier(db=db, branch_name=branch_name)
158
- for node_id in node_ids:
159
- node = await NodeManager.get_one(db=db, branch=branch_name, id=node_id, at=right_now)
160
- if node:
161
- updated_field_names = await applier.apply_profiles(node=node)
162
- if updated_field_names:
163
- await node.save(db=db, fields=updated_field_names, at=right_now)
164
- progress.update(apply_task, advance=1)
165
-
166
- return result
@@ -1,97 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, Any
4
-
5
- from rich.progress import Progress
6
- from typing_extensions import Self
7
-
8
- from infrahub.core import registry
9
- from infrahub.core.constants import SchemaPathType
10
- from infrahub.core.initialization import initialization
11
- from infrahub.core.migrations.schema.node_attribute_add import NodeAttributeAddMigration
12
- from infrahub.core.migrations.shared import InternalSchemaMigration, MigrationResult
13
- from infrahub.core.path import SchemaPath
14
- from infrahub.lock import initialize_lock
15
-
16
- if TYPE_CHECKING:
17
- from infrahub.database import InfrahubDatabase
18
-
19
-
20
- class Migration041(InternalSchemaMigration):
21
- name: str = "041_create_hfid_display_label_in_db"
22
- minimum_version: int = 40
23
-
24
- @classmethod
25
- def init(cls, **kwargs: Any) -> Self:
26
- internal_schema = cls.get_internal_schema()
27
- schema_node = internal_schema.get_node(name="SchemaNode")
28
- schema_generic = internal_schema.get_node(name="SchemaGeneric")
29
-
30
- cls.migrations = [
31
- # HFID is not needed, it was introduced at graph v8
32
- NodeAttributeAddMigration(
33
- new_node_schema=schema_node,
34
- previous_node_schema=schema_node,
35
- schema_path=SchemaPath(
36
- schema_kind="SchemaNode", path_type=SchemaPathType.ATTRIBUTE, field_name="display_label"
37
- ),
38
- ),
39
- NodeAttributeAddMigration(
40
- new_node_schema=schema_generic,
41
- previous_node_schema=schema_generic,
42
- schema_path=SchemaPath(
43
- schema_kind="SchemaGeneric", path_type=SchemaPathType.ATTRIBUTE, field_name="display_label"
44
- ),
45
- ),
46
- ]
47
- return cls(migrations=cls.migrations, **kwargs) # type: ignore[arg-type]
48
-
49
- async def execute(self, db: InfrahubDatabase) -> MigrationResult:
50
- result = MigrationResult()
51
-
52
- # load schemas from database into registry
53
- initialize_lock()
54
- await initialization(db=db)
55
-
56
- default_branch = registry.get_branch_from_registry()
57
- schema_branch = await registry.schema.load_schema_from_db(db=db, branch=default_branch)
58
-
59
- migrations = list(self.migrations)
60
-
61
- for node_schema_kind in schema_branch.node_names:
62
- schema = schema_branch.get(name=node_schema_kind, duplicate=False)
63
- migrations.extend(
64
- [
65
- NodeAttributeAddMigration(
66
- new_node_schema=schema,
67
- previous_node_schema=schema,
68
- schema_path=SchemaPath(
69
- schema_kind=schema.kind, path_type=SchemaPathType.ATTRIBUTE, field_name="human_friendly_id"
70
- ),
71
- ),
72
- NodeAttributeAddMigration(
73
- new_node_schema=schema,
74
- previous_node_schema=schema,
75
- schema_path=SchemaPath(
76
- schema_kind=schema.kind, path_type=SchemaPathType.ATTRIBUTE, field_name="display_label"
77
- ),
78
- ),
79
- ]
80
- )
81
-
82
- with Progress() as progress:
83
- update_task = progress.add_task("Adding HFID and display label to nodes", total=len(migrations))
84
-
85
- for migration in migrations:
86
- try:
87
- execution_result = await migration.execute(db=db, branch=default_branch)
88
- result.errors.extend(execution_result.errors)
89
- progress.update(update_task, advance=1)
90
- except Exception as exc:
91
- result.errors.append(str(exc))
92
- return result
93
-
94
- return result
95
-
96
- async def validate_migration(self, db: InfrahubDatabase) -> MigrationResult: # noqa: ARG002
97
- return MigrationResult()