infrahub-server 1.3.0a0__py3-none-any.whl → 1.3.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. infrahub/actions/tasks.py +4 -11
  2. infrahub/branch/__init__.py +0 -0
  3. infrahub/branch/tasks.py +29 -0
  4. infrahub/branch/triggers.py +22 -0
  5. infrahub/cli/db.py +2 -2
  6. infrahub/computed_attribute/gather.py +3 -1
  7. infrahub/computed_attribute/tasks.py +23 -29
  8. infrahub/core/attribute.py +3 -3
  9. infrahub/core/constants/__init__.py +10 -0
  10. infrahub/core/constants/database.py +1 -0
  11. infrahub/core/constants/infrahubkind.py +2 -0
  12. infrahub/core/convert_object_type/conversion.py +1 -1
  13. infrahub/core/diff/query/save.py +67 -40
  14. infrahub/core/diff/query/time_range_query.py +0 -1
  15. infrahub/core/graph/__init__.py +1 -1
  16. infrahub/core/migrations/graph/__init__.py +6 -0
  17. infrahub/core/migrations/graph/m013_convert_git_password_credential.py +0 -2
  18. infrahub/core/migrations/graph/m029_duplicates_cleanup.py +662 -0
  19. infrahub/core/migrations/graph/m030_illegal_edges.py +82 -0
  20. infrahub/core/migrations/query/attribute_add.py +13 -9
  21. infrahub/core/migrations/query/attribute_rename.py +2 -4
  22. infrahub/core/migrations/query/delete_element_in_schema.py +16 -11
  23. infrahub/core/migrations/query/node_duplicate.py +16 -15
  24. infrahub/core/migrations/query/relationship_duplicate.py +16 -12
  25. infrahub/core/migrations/schema/node_attribute_remove.py +1 -2
  26. infrahub/core/migrations/schema/node_remove.py +16 -14
  27. infrahub/core/node/__init__.py +74 -14
  28. infrahub/core/node/base.py +1 -1
  29. infrahub/core/node/resource_manager/ip_address_pool.py +6 -2
  30. infrahub/core/node/resource_manager/ip_prefix_pool.py +6 -2
  31. infrahub/core/node/resource_manager/number_pool.py +31 -5
  32. infrahub/core/node/standard.py +6 -1
  33. infrahub/core/path.py +1 -1
  34. infrahub/core/protocols.py +10 -0
  35. infrahub/core/query/node.py +1 -1
  36. infrahub/core/query/relationship.py +4 -6
  37. infrahub/core/query/standard_node.py +19 -5
  38. infrahub/core/relationship/constraints/peer_relatives.py +72 -0
  39. infrahub/core/relationship/model.py +1 -1
  40. infrahub/core/schema/attribute_parameters.py +129 -5
  41. infrahub/core/schema/attribute_schema.py +62 -14
  42. infrahub/core/schema/basenode_schema.py +2 -2
  43. infrahub/core/schema/definitions/core/__init__.py +16 -2
  44. infrahub/core/schema/definitions/core/group.py +45 -0
  45. infrahub/core/schema/definitions/core/resource_pool.py +29 -0
  46. infrahub/core/schema/definitions/internal.py +25 -4
  47. infrahub/core/schema/generated/attribute_schema.py +12 -5
  48. infrahub/core/schema/generated/relationship_schema.py +6 -1
  49. infrahub/core/schema/manager.py +7 -2
  50. infrahub/core/schema/schema_branch.py +69 -5
  51. infrahub/core/validators/__init__.py +8 -0
  52. infrahub/core/validators/attribute/choices.py +0 -1
  53. infrahub/core/validators/attribute/enum.py +0 -1
  54. infrahub/core/validators/attribute/kind.py +0 -1
  55. infrahub/core/validators/attribute/length.py +0 -1
  56. infrahub/core/validators/attribute/min_max.py +118 -0
  57. infrahub/core/validators/attribute/number_pool.py +106 -0
  58. infrahub/core/validators/attribute/optional.py +0 -2
  59. infrahub/core/validators/attribute/regex.py +0 -1
  60. infrahub/core/validators/enum.py +5 -0
  61. infrahub/core/validators/tasks.py +1 -1
  62. infrahub/database/__init__.py +16 -4
  63. infrahub/database/validation.py +100 -0
  64. infrahub/dependencies/builder/constraint/grouped/node_runner.py +2 -0
  65. infrahub/dependencies/builder/constraint/relationship_manager/peer_relatives.py +8 -0
  66. infrahub/dependencies/builder/diff/deserializer.py +1 -1
  67. infrahub/dependencies/registry.py +2 -0
  68. infrahub/events/models.py +1 -1
  69. infrahub/git/base.py +5 -3
  70. infrahub/git/integrator.py +102 -3
  71. infrahub/graphql/mutations/main.py +1 -1
  72. infrahub/graphql/mutations/resource_manager.py +54 -6
  73. infrahub/graphql/queries/resource_manager.py +7 -1
  74. infrahub/graphql/queries/task.py +10 -0
  75. infrahub/graphql/resolvers/many_relationship.py +1 -1
  76. infrahub/graphql/resolvers/resolver.py +2 -2
  77. infrahub/graphql/resolvers/single_relationship.py +1 -1
  78. infrahub/graphql/types/task_log.py +3 -2
  79. infrahub/menu/menu.py +8 -7
  80. infrahub/message_bus/operations/refresh/registry.py +3 -3
  81. infrahub/patch/queries/delete_duplicated_edges.py +40 -29
  82. infrahub/pools/number.py +5 -3
  83. infrahub/pools/registration.py +22 -0
  84. infrahub/pools/tasks.py +56 -0
  85. infrahub/schema/__init__.py +0 -0
  86. infrahub/schema/tasks.py +27 -0
  87. infrahub/schema/triggers.py +23 -0
  88. infrahub/task_manager/task.py +44 -4
  89. infrahub/trigger/catalogue.py +4 -0
  90. infrahub/trigger/models.py +5 -4
  91. infrahub/trigger/setup.py +26 -2
  92. infrahub/trigger/tasks.py +1 -1
  93. infrahub/types.py +6 -0
  94. infrahub/webhook/tasks.py +6 -9
  95. infrahub/workflows/catalogue.py +27 -1
  96. infrahub_sdk/client.py +43 -10
  97. infrahub_sdk/node/__init__.py +39 -0
  98. infrahub_sdk/node/attribute.py +122 -0
  99. infrahub_sdk/node/constants.py +21 -0
  100. infrahub_sdk/{node.py → node/node.py} +50 -749
  101. infrahub_sdk/node/parsers.py +15 -0
  102. infrahub_sdk/node/property.py +24 -0
  103. infrahub_sdk/node/related_node.py +266 -0
  104. infrahub_sdk/node/relationship.py +302 -0
  105. infrahub_sdk/protocols.py +112 -0
  106. infrahub_sdk/protocols_base.py +34 -2
  107. infrahub_sdk/query_groups.py +13 -2
  108. infrahub_sdk/schema/main.py +1 -0
  109. infrahub_sdk/schema/repository.py +16 -0
  110. infrahub_sdk/spec/object.py +1 -1
  111. infrahub_sdk/store.py +1 -1
  112. infrahub_sdk/testing/schemas/car_person.py +1 -0
  113. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/METADATA +3 -3
  114. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/RECORD +122 -100
  115. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/WHEEL +1 -1
  116. infrahub_testcontainers/container.py +239 -64
  117. infrahub_testcontainers/docker-compose-cluster.test.yml +321 -0
  118. infrahub_testcontainers/docker-compose.test.yml +1 -0
  119. infrahub_testcontainers/helpers.py +15 -1
  120. infrahub_testcontainers/plugin.py +9 -0
  121. infrahub/patch/queries/consolidate_duplicated_nodes.py +0 -106
  122. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/LICENSE.txt +0 -0
  123. {infrahub_server-1.3.0a0.dist-info → infrahub_server-1.3.0b2.dist-info}/entry_points.txt +0 -0
@@ -66,9 +66,12 @@ PROJECT_ENV_VARIABLES: dict[str, str] = {
66
66
  class InfrahubDockerCompose(DockerCompose):
67
67
  project_name: str | None = None
68
68
  env_vars: dict[str, str] = field(default_factory=dict)
69
+ deployment_type: str | None = None
69
70
 
70
71
  @classmethod
71
- def init(cls, directory: Path | None = None, version: str | None = None) -> Self:
72
+ def init(
73
+ cls, directory: Path | None = None, version: str | None = None, deployment_type: str | None = None
74
+ ) -> Self:
72
75
  if not directory:
73
76
  directory = Path.cwd()
74
77
 
@@ -79,7 +82,7 @@ class InfrahubDockerCompose(DockerCompose):
79
82
  if version == "local" and infrahub_image_version:
80
83
  version = infrahub_image_version
81
84
 
82
- compose = cls(project_name=cls.generate_project_name(), context=directory)
85
+ compose = cls(project_name=cls.generate_project_name(), context=directory, deployment_type=deployment_type)
83
86
  compose.create_docker_file(directory=directory)
84
87
  compose.create_env_file(directory=directory, version=version)
85
88
 
@@ -111,7 +114,10 @@ class InfrahubDockerCompose(DockerCompose):
111
114
 
112
115
  def create_docker_file(self, directory: Path) -> Path:
113
116
  current_directory = Path(__file__).resolve().parent
114
- compose_file = current_directory / "docker-compose.test.yml"
117
+ compose_file_name = (
118
+ "docker-compose-cluster.test.yml" if self.deployment_type == "cluster" else "docker-compose.test.yml"
119
+ )
120
+ compose_file = current_directory / compose_file_name
115
121
 
116
122
  test_compose_file = directory / "docker-compose.yml"
117
123
  test_compose_file.write_bytes(compose_file.read_bytes())
@@ -160,7 +166,7 @@ class InfrahubDockerCompose(DockerCompose):
160
166
  cmd.extend(self.services)
161
167
  self._run_command(cmd=cmd)
162
168
 
163
- def start_container(self, service_name: str) -> None:
169
+ def start_container(self, service_name: str | list[str]) -> None:
164
170
  """
165
171
  Starts a specific service of the docker compose environment.
166
172
 
@@ -170,7 +176,11 @@ class InfrahubDockerCompose(DockerCompose):
170
176
 
171
177
  # pull means running a separate command before starting
172
178
  if self.pull:
173
- pull_cmd = [*base_cmd, "pull", service_name]
179
+ pull_cmd = [*base_cmd, "pull"]
180
+ if isinstance(service_name, list):
181
+ pull_cmd.extend(service_name)
182
+ else:
183
+ pull_cmd.append(service_name)
174
184
  self._run_command(cmd=pull_cmd)
175
185
 
176
186
  up_cmd = [*base_cmd, "up"]
@@ -185,7 +195,10 @@ class InfrahubDockerCompose(DockerCompose):
185
195
  # we run in detached mode instead of blocking
186
196
  up_cmd.append("--detach")
187
197
 
188
- up_cmd.append(service_name)
198
+ if isinstance(service_name, list):
199
+ up_cmd.extend(service_name)
200
+ else:
201
+ up_cmd.append(service_name)
189
202
  self._run_command(cmd=up_cmd)
190
203
 
191
204
  # TODO would be good to the support for project_name upstream
@@ -233,7 +246,7 @@ class InfrahubDockerCompose(DockerCompose):
233
246
  dest_dir / backup_name,
234
247
  )
235
248
 
236
- def database_restore_backup(self, backup_file: Path) -> None:
249
+ def database_restore_backup(self, backup_file: Path) -> None: # noqa: PLR0915
237
250
  assert self.use_neo4j_enterprise
238
251
 
239
252
  shutil.copy(
@@ -242,52 +255,35 @@ class InfrahubDockerCompose(DockerCompose):
242
255
  )
243
256
  service_name = "database"
244
257
 
245
- # Ensure the database container is running otherwise start it
246
- try:
247
- self.get_container(service_name=service_name)
248
- except ContainerIsNotRunning:
249
- self.start_container(service_name=service_name)
250
-
251
- self.exec_in_container(
252
- command=["cypher-shell", "-u", "neo4j", "-p", "admin", "STOP DATABASE neo4j;"],
253
- service_name=service_name,
254
- )
258
+ if self.deployment_type != "cluster": # noqa: PLR1702
259
+ try:
260
+ self.get_container(service_name=service_name)
261
+ except ContainerIsNotRunning:
262
+ self.start_container(service_name=service_name)
255
263
 
256
- self.exec_in_container(
257
- command=[
258
- "neo4j-admin",
259
- "database",
260
- "restore",
261
- "--overwrite-destination",
262
- "--from-path",
263
- str(self.internal_backup_dir / backup_file.name),
264
- ],
265
- service_name=service_name,
266
- )
264
+ self.exec_in_container(
265
+ command=["cypher-shell", "-u", "neo4j", "-p", "admin", "STOP DATABASE neo4j;"],
266
+ service_name=service_name,
267
+ )
267
268
 
268
- self.exec_in_container(
269
- command=["chown", "-R", "neo4j:neo4j", "/data"],
270
- service_name=service_name,
271
- )
269
+ self.exec_in_container(
270
+ command=[
271
+ "neo4j-admin",
272
+ "database",
273
+ "restore",
274
+ "--overwrite-destination",
275
+ "--from-path",
276
+ str(self.internal_backup_dir / backup_file.name),
277
+ ],
278
+ service_name=service_name,
279
+ )
272
280
 
273
- (restore_output, _, _) = self.exec_in_container(
274
- command=[
275
- "cypher-shell",
276
- "--format",
277
- "plain",
278
- "-d",
279
- "system",
280
- "-u",
281
- "neo4j",
282
- "-p",
283
- "admin",
284
- "START DATABASE neo4j;",
285
- ],
286
- service_name=service_name,
287
- )
281
+ self.exec_in_container(
282
+ command=["chown", "-R", "neo4j:neo4j", "/data"],
283
+ service_name=service_name,
284
+ )
288
285
 
289
- for _ in range(3):
290
- (stdout, _, _) = self.exec_in_container(
286
+ (restore_output, _, _) = self.exec_in_container(
291
287
  command=[
292
288
  "cypher-shell",
293
289
  "--format",
@@ -298,26 +294,205 @@ class InfrahubDockerCompose(DockerCompose):
298
294
  "neo4j",
299
295
  "-p",
300
296
  "admin",
301
- "SHOW DATABASES WHERE name = 'neo4j' AND currentStatus = 'online';",
297
+ "START DATABASE neo4j;",
302
298
  ],
303
299
  service_name=service_name,
304
300
  )
305
- if stdout:
306
- break
307
- time.sleep(5)
301
+
302
+ for _ in range(3):
303
+ (stdout, _, _) = self.exec_in_container(
304
+ command=[
305
+ "cypher-shell",
306
+ "--format",
307
+ "plain",
308
+ "-d",
309
+ "system",
310
+ "-u",
311
+ "neo4j",
312
+ "-p",
313
+ "admin",
314
+ "SHOW DATABASES WHERE name = 'neo4j' AND currentStatus = 'online';",
315
+ ],
316
+ service_name=service_name,
317
+ )
318
+ if stdout:
319
+ break
320
+ time.sleep(5)
321
+ else:
322
+ (debug_logs, _, _) = self.exec_in_container(
323
+ command=["cat", "logs/debug.log"],
324
+ service_name=service_name,
325
+ )
326
+ raise Exception(f"Failed to restore database:\n{restore_output}\nDebug logs:\n{debug_logs}")
327
+
328
+ old_services = self.services
329
+ self.services = ["infrahub-server", "task-worker"]
330
+ self.stop(down=False)
331
+ try:
332
+ self.start()
333
+ except Exception as exc:
334
+ stdout, stderr = self.get_logs()
335
+ raise Exception(f"Failed to start docker compose:\nStdout:\n{stdout}\nStderr:\n{stderr}") from exc
336
+ self.services = old_services
308
337
  else:
309
- (debug_logs, _, _) = self.exec_in_container(
310
- command=["cat", "logs/debug.log"],
338
+ print("Cluster mode detected")
339
+ try:
340
+ self.get_container(service_name=service_name)
341
+ self.get_container(service_name="database-core2")
342
+ self.get_container(service_name="database-core3")
343
+ except ContainerIsNotRunning:
344
+ self.start_container("database", "database-core2", "database-core3")
345
+
346
+ # Waiting for cluster to stabilize...
347
+ time.sleep(10)
348
+
349
+ self.exec_in_container(
350
+ command=["cypher-shell", "-u", "neo4j", "-p", "admin", "DROP DATABASE neo4j;"],
351
+ service_name=service_name,
352
+ )
353
+
354
+ self.exec_in_container(
355
+ command=["rm", "-rf", "/data/databases/neo4j"],
356
+ service_name=service_name,
357
+ )
358
+ self.exec_in_container(
359
+ command=["rm", "-rf", "/data/transactions/neo4j"],
360
+ service_name=service_name,
361
+ )
362
+
363
+ self.exec_in_container(
364
+ command=[
365
+ "neo4j-admin",
366
+ "database",
367
+ "restore",
368
+ "--from-path",
369
+ str(self.internal_backup_dir / backup_file.name),
370
+ "neo4j",
371
+ ],
311
372
  service_name=service_name,
312
373
  )
313
- raise Exception(f"Failed to restore database:\n{restore_output}\nDebug logs:\n{debug_logs}")
314
374
 
315
- old_services = self.services
316
- self.services = ["infrahub-server", "task-worker"]
317
- self.stop(down=False)
318
- try:
375
+ cmd = self.compose_command_property[:]
376
+ cmd += ["restart", "database"]
377
+ self._run_command(cmd=cmd)
378
+
379
+ main_node = service_name
380
+ cluster_nodes = ["database", "database-core2", "database-core3"]
381
+
382
+ for attempt in range(3):
383
+ try:
384
+ (stdout, _, _) = self.exec_in_container(
385
+ command=[
386
+ "cypher-shell",
387
+ "--format",
388
+ "plain",
389
+ "-d",
390
+ "system",
391
+ "-u",
392
+ "neo4j",
393
+ "-p",
394
+ "admin",
395
+ "SHOW DATABASES YIELD name, address, currentStatus WHERE name = 'system' RETURN address, currentStatus",
396
+ ],
397
+ service_name=main_node,
398
+ )
399
+ except Exception:
400
+ time.sleep(10)
401
+ continue
402
+
403
+ raw_output = stdout
404
+ nodes_status = dict.fromkeys(cluster_nodes, False)
405
+ online_count = 0
406
+ total_entries = 0
407
+
408
+ try:
409
+ for line_raw in stdout.splitlines():
410
+ line = line_raw.strip()
411
+ if not line or line.startswith("address"):
412
+ continue
413
+
414
+ total_entries += 1
415
+ if "online" in line:
416
+ online_count += 1
417
+ for node in cluster_nodes:
418
+ node_pattern = f'"{node}:'
419
+ if node_pattern in line:
420
+ nodes_status[node] = True
421
+ break
422
+ if all(nodes_status.values()) and online_count == len(cluster_nodes):
423
+ break
424
+ except Exception as e:
425
+ print(f"Error parsing database status on attempt {attempt + 1}: {e}")
426
+
427
+ print(f"Waiting for all nodes to be online. Current status: {nodes_status}")
428
+ time.sleep(5)
429
+ else:
430
+ debug_logs = {}
431
+ for node in cluster_nodes:
432
+ try:
433
+ (logs, _, _) = self.exec_in_container(
434
+ command=["cat", "logs/debug.log"],
435
+ service_name=node,
436
+ )
437
+ debug_logs[node] = logs
438
+ except Exception as e:
439
+ debug_logs[node] = f"Could not retrieve logs: {str(e)}"
440
+
441
+ debug_info = f"Raw output from SHOW DATABASES command:\n{raw_output}\n\n"
442
+ debug_info += f"Final node status: {nodes_status}\n\n"
443
+
444
+ status_str = ", ".join(
445
+ [f"{node}: {'online' if status else 'offline'}" for node, status in nodes_status.items()]
446
+ )
447
+ logs_str = debug_info + "\n\n".join(
448
+ [f"--- {node} logs ---\n{logs}" for node, logs in debug_logs.items()]
449
+ )
450
+
451
+ raise Exception(
452
+ f"Failed to restore database cluster. Node status: {status_str}\nDebug logs:\n{logs_str}"
453
+ )
454
+
455
+ server_id = None
456
+ try:
457
+ stdout, _, _ = self.exec_in_container(
458
+ command=[
459
+ "cypher-shell",
460
+ "--format",
461
+ "plain",
462
+ "-d",
463
+ "system",
464
+ "-u",
465
+ "neo4j",
466
+ "-p",
467
+ "admin",
468
+ 'SHOW SERVERS YIELD name, address WHERE address = "database:7687" RETURN name;',
469
+ ],
470
+ service_name=service_name,
471
+ )
472
+
473
+ lines = stdout.splitlines()
474
+ for line_raw in lines:
475
+ line = line_raw.strip()
476
+ if not line or line == "name" or line.startswith("+"):
477
+ continue
478
+ server_id = line.strip('"')
479
+ break
480
+ except Exception as e:
481
+ print(f"Error retrieving server ID with direct query: {e}")
482
+
483
+ if server_id:
484
+ self.exec_in_container(
485
+ command=[
486
+ "cypher-shell",
487
+ "-d",
488
+ "system",
489
+ "-u",
490
+ "neo4j",
491
+ "-p",
492
+ "admin",
493
+ f"CREATE DATABASE neo4j TOPOLOGY 3 PRIMARIES OPTIONS {{ existingData: 'use', existingDataSeedInstance: '{server_id}' }};",
494
+ ],
495
+ service_name=service_name,
496
+ )
319
497
  self.start()
320
- except Exception as exc:
321
- stdout, stderr = self.get_logs()
322
- raise Exception(f"Failed to start docker compose:\nStdout:\n{stdout}\nStderr:\n{stderr}") from exc
323
- self.services = old_services
498
+ print("Database restored successfully")
@@ -0,0 +1,321 @@
1
+ ---
2
+ # yamllint disable rule:line-length
3
+ # The following environment variables are part of the Infrahub configuration options.
4
+ # For detailed information on these configuration options, please refer to the Infrahub documentation:
5
+ # https://docs.infrahub.app/reference/configuration
6
+ x-neo4j-config-common: &neo4j-config-common
7
+ NEO4J_AUTH: neo4j/admin
8
+ NEO4J_dbms_security_procedures_unrestricted: apoc.*
9
+ NEO4J_dbms_security_auth__minimum__password__length: 4
10
+ NEO4J_ACCEPT_LICENSE_AGREEMENT: 'yes'
11
+ NEO4J_server_backup_enabled: true
12
+ NEO4J_metrics_prometheus_enabled: true
13
+ NEO4J_server_metrics_filter: '*'
14
+ NEO4J_server_cluster_system__database__mode: PRIMARY
15
+ NEO4J_initial_server_mode__constraint: PRIMARY
16
+ NEO4J_dbms_cluster_discovery_endpoints: database:5000,database-core2:5000,database-core3:5000
17
+ NEO4J_initial_dbms_default__primaries__count: 3
18
+ NEO4J_dbms_memory_heap_initial__size: ${INFRAHUB_TESTING_DB_HEAP_INITIAL_SIZE}
19
+ NEO4J_dbms_memory_heap_max__size: ${INFRAHUB_TESTING_DB_HEAP_MAX_SIZE}
20
+ NEO4J_server_memory_pagecache_size: ${INFRAHUB_TESTING_DB_PAGECACHE_SIZE}
21
+
22
+
23
+ services:
24
+ message-queue:
25
+ image: ${MESSAGE_QUEUE_DOCKER_IMAGE:-rabbitmq:3.13.7-management}
26
+ restart: unless-stopped
27
+ environment:
28
+ RABBITMQ_DEFAULT_USER: infrahub
29
+ RABBITMQ_DEFAULT_PASS: infrahub
30
+ healthcheck:
31
+ test: rabbitmq-diagnostics -q check_port_connectivity
32
+ interval: 5s
33
+ timeout: 30s
34
+ retries: 10
35
+ start_period: 3s
36
+ ports:
37
+ - ${INFRAHUB_TESTING_MESSAGE_QUEUE_PORT:-0}:15692
38
+
39
+ cache:
40
+ image: ${CACHE_DOCKER_IMAGE:-redis:7.2.4}
41
+ restart: unless-stopped
42
+ healthcheck:
43
+ test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
44
+ interval: 5s
45
+ timeout: 5s
46
+ retries: 3
47
+
48
+ infrahub-server-lb:
49
+ image: haproxy:3.1-alpine
50
+ volumes:
51
+ - ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg
52
+ depends_on:
53
+ infrahub-server:
54
+ condition: service_started
55
+ healthcheck:
56
+ test: wget -O /dev/null http://127.0.0.1:8000/api/config || exit 1
57
+ interval: 5s
58
+ timeout: 5s
59
+ retries: 20
60
+ start_period: 10s
61
+ ports:
62
+ - ${INFRAHUB_TESTING_SERVER_PORT:-0}:8000
63
+
64
+ database:
65
+ deploy:
66
+ resources:
67
+ limits:
68
+ cpus: ${INFRAHUB_TESTING_DB_CPU_LIMIT}
69
+ memory: ${INFRAHUB_TESTING_DB_MEMORY_LIMIT}
70
+ image: "${DATABASE_DOCKER_IMAGE:-neo4j:5.20.0-enterprise}"
71
+ restart: unless-stopped
72
+ environment:
73
+ <<: *neo4j-config-common
74
+ NEO4J_metrics_prometheus_endpoint: 0.0.0.0:2004
75
+ NEO4J_server_backup_listen__address: 0.0.0.0:6362
76
+ NEO4J_server_discovery_advertised__address: database:5000
77
+ NEO4J_server_cluster_advertised__address: database:6000
78
+ NEO4J_server_cluster_raft_advertised__address: database:7000
79
+ NEO4J_server_bolt_advertised__address: database:7687
80
+ NEO4J_server_http_advertised__address: database:7474
81
+ NEO4J_server_https_advertised__address: database:7473
82
+ volumes:
83
+ - "database_data:/data"
84
+ - "database_logs:/logs"
85
+ - "./${INFRAHUB_TESTING_LOCAL_DB_BACKUP_DIRECTORY}:${INFRAHUB_TESTING_INTERNAL_DB_BACKUP_DIRECTORY}"
86
+ healthcheck:
87
+ test: wget http://localhost:7474 || exit 1
88
+ interval: 2s
89
+ timeout: 10s
90
+ retries: 20
91
+ start_period: 3s
92
+ ports:
93
+ - ${INFRAHUB_TESTING_DATABASE_PORT:-0}:6362
94
+ - ${INFRAHUB_TESTING_DATABASE_UI_PORT:-0}:7474
95
+
96
+ database-core2:
97
+ deploy:
98
+ resources:
99
+ limits:
100
+ cpus: ${INFRAHUB_TESTING_DB_CPU_LIMIT}
101
+ memory: ${INFRAHUB_TESTING_DB_MEMORY_LIMIT}
102
+ image: "${DATABASE_DOCKER_IMAGE:-neo4j:5.20.0-enterprise}"
103
+ environment:
104
+ <<: *neo4j-config-common
105
+ NEO4J_metrics_prometheus_endpoint: 0.0.0.0:2005
106
+ NEO4J_server_backup_listen__address: 0.0.0.0:6363
107
+ NEO4J_server_discovery_advertised__address: database-core2:5000
108
+ NEO4J_server_cluster_advertised__address: database-core2:6000
109
+ NEO4J_server_cluster_raft_advertised__address: database-core2:7000
110
+ NEO4J_server_bolt_advertised__address: database-core2:7687
111
+ NEO4J_server_http_advertised__address: database-core2:7474
112
+ NEO4J_server_https_advertised__address: database-core2:7473
113
+ volumes:
114
+ - "./plugins:/plugins"
115
+ - "database_data_core2:/data"
116
+ - "database_logs_core2:/logs"
117
+ healthcheck:
118
+ test: wget http://localhost:7474 || exit 1
119
+ interval: 5s
120
+ timeout: 10s
121
+ retries: 40
122
+ start_period: 30s
123
+ labels:
124
+ infrahub_role: "database"
125
+ com.github.run_id: "${GITHUB_RUN_ID:-unknown}"
126
+ com.github.job: "${JOB_NAME:-unknown}"
127
+ ports:
128
+ - "${INFRAHUB_TESTING_DATABASE_PORT:-0}:6363"
129
+
130
+ database-core3:
131
+ deploy:
132
+ resources:
133
+ limits:
134
+ cpus: ${INFRAHUB_TESTING_DB_CPU_LIMIT}
135
+ memory: ${INFRAHUB_TESTING_DB_MEMORY_LIMIT}
136
+ image: "${DATABASE_DOCKER_IMAGE:-neo4j:5.20.0-enterprise}"
137
+ environment:
138
+ <<: *neo4j-config-common
139
+ NEO4J_metrics_prometheus_endpoint: 0.0.0.0:2006
140
+ NEO4J_server_backup_listen__address: 0.0.0.0:6364
141
+ NEO4J_server_discovery_advertised__address: database-core3:5000
142
+ NEO4J_server_cluster_advertised__address: database-core3:6000
143
+ NEO4J_server_cluster_raft_advertised__address: database-core3:7000
144
+ NEO4J_server_bolt_advertised__address: database-core3:7687
145
+ NEO4J_server_http_advertised__address: database-core3:7474
146
+ NEO4J_server_https_advertised__address: database-core3:7473
147
+ volumes:
148
+ - "./plugins:/plugins"
149
+ - "database_data_core3:/data"
150
+ - "database_logs_core3:/logs"
151
+ healthcheck:
152
+ test: wget http://localhost:7474 || exit 1
153
+ interval: 5s
154
+ timeout: 10s
155
+ retries: 40
156
+ start_period: 30s
157
+ labels:
158
+ infrahub_role: "database"
159
+ com.github.run_id: "${GITHUB_RUN_ID:-unknown}"
160
+ com.github.job: "${JOB_NAME:-unknown}"
161
+ ports:
162
+ - "${INFRAHUB_TESTING_DATABASE_PORT:-0}:6364"
163
+
164
+ task-manager:
165
+ image: "${INFRAHUB_TESTING_DOCKER_IMAGE}:${INFRAHUB_TESTING_IMAGE_VERSION}"
166
+ command: uvicorn --host 0.0.0.0 --port 4200 --factory infrahub.prefect_server.app:create_infrahub_prefect
167
+ depends_on:
168
+ task-manager-db:
169
+ condition: service_healthy
170
+ environment:
171
+ PREFECT_UI_ENABLED: "${INFRAHUB_TESTING_PREFECT_UI_ENABLED}" # enabling UI requires permissions, run container as root to enable UI
172
+ PREFECT_API_DATABASE_CONNECTION_URL: postgresql+asyncpg://postgres:postgres@task-manager-db:5432/prefect
173
+ healthcheck:
174
+ test: /usr/local/bin/httpx http://localhost:4200/api/health || exit 1
175
+ interval: 5s
176
+ timeout: 5s
177
+ retries: 20
178
+ start_period: 10s
179
+ ports:
180
+ - ${INFRAHUB_TESTING_TASK_MANAGER_PORT:-0}:4200
181
+
182
+ task-manager-db:
183
+ image: "${POSTGRES_DOCKER_IMAGE:-postgres:16-alpine}"
184
+ environment:
185
+ - POSTGRES_USER=postgres
186
+ - POSTGRES_PASSWORD=postgres
187
+ - POSTGRES_DB=prefect
188
+ volumes:
189
+ - workflow_db:/var/lib/postgresql/data
190
+ healthcheck:
191
+ test: ["CMD-SHELL", "pg_isready"]
192
+ interval: 10s
193
+ timeout: 5s
194
+ retries: 5
195
+
196
+ infrahub-server:
197
+ deploy:
198
+ mode: replicated
199
+ replicas: ${INFRAHUB_TESTING_API_SERVER_COUNT}
200
+ image: "${INFRAHUB_TESTING_DOCKER_IMAGE}:${INFRAHUB_TESTING_IMAGE_VERSION}"
201
+ command: ${INFRAHUB_TESTING_DOCKER_ENTRYPOINT}
202
+ environment:
203
+ INFRAHUB_PRODUCTION: ${INFRAHUB_TESTING_PRODUCTION}
204
+ INFRAHUB_LOG_LEVEL: ${INFRAHUB_TESTING_LOG_LEVEL:-INFO}
205
+ INFRAHUB_BROKER_ADDRESS: ${INFRAHUB_TESTING_BROKER_ADDRESS}
206
+ INFRAHUB_CACHE_ADDRESS: ${INFRAHUB_TESTING_CACHE_ADDRESS}
207
+ INFRAHUB_DB_ADDRESS: ${INFRAHUB_TESTING_DB_ADDRESS}
208
+ INFRAHUB_DB_PROTOCOL: ${INFRAHUB_TESTING_DB_PROTOCOL:-neo4j}
209
+ INFRAHUB_WORKFLOW_ADDRESS: ${INFRAHUB_TESTING_WORKFLOW_ADDRESS}
210
+ INFRAHUB_WORKFLOW_DEFAULT_WORKER_TYPE: ${INFRAHUB_TESTING_WORKFLOW_DEFAULT_WORKER_TYPE}
211
+ INFRAHUB_INITIAL_ADMIN_TOKEN: ${INFRAHUB_TESTING_INITIAL_ADMIN_TOKEN}
212
+ INFRAHUB_INITIAL_AGENT_TOKEN: ${INFRAHUB_TESTING_INITIAL_AGENT_TOKEN}
213
+ INFRAHUB_SECURITY_SECRET_KEY: ${INFRAHUB_TESTING_SECURITY_SECRET_KEY}
214
+ PREFECT_API_URL: ${INFRAHUB_TESTING_PREFECT_API}
215
+ # Tracing
216
+ INFRAHUB_TRACE_ENABLE: ${INFRAHUB_TRACE_ENABLE:-false}
217
+ INFRAHUB_TRACE_EXPORTER_ENDPOINT:
218
+ INFRAHUB_TRACE_EXPORTER_PROTOCOL: ${INFRAHUB_TRACE_EXPORTER_PROTOCOL:-grpc}
219
+ INFRAHUB_TRACE_EXPORTER_TYPE: ${INFRAHUB_TRACE_EXPORTER_TYPE:-console}
220
+ INFRAHUB_TRACE_INSECURE: ${INFRAHUB_TRACE_INSECURE:-true}
221
+ OTEL_RESOURCE_ATTRIBUTES:
222
+ depends_on:
223
+ database:
224
+ condition: service_healthy
225
+ database-core2:
226
+ condition: service_healthy
227
+ database-core3:
228
+ condition: service_healthy
229
+ message-queue:
230
+ condition: service_healthy
231
+ cache:
232
+ condition: service_healthy
233
+ task-manager:
234
+ condition: service_healthy
235
+ volumes:
236
+ - "storage_data:/opt/infrahub/storage"
237
+ tty: true
238
+ healthcheck:
239
+ test: curl -s -f -o /dev/null http://localhost:8000/api/config || exit 1
240
+ interval: 5s
241
+ timeout: 5s
242
+ retries: 20
243
+ start_period: 10s
244
+
245
+ task-worker:
246
+ deploy:
247
+ mode: replicated
248
+ replicas: ${INFRAHUB_TESTING_TASK_WORKER_COUNT}
249
+ image: "${INFRAHUB_TESTING_DOCKER_IMAGE}:${INFRAHUB_TESTING_IMAGE_VERSION}"
250
+ command: prefect worker start --type ${INFRAHUB_TESTING_WORKFLOW_DEFAULT_WORKER_TYPE} --pool infrahub-worker --with-healthcheck
251
+ environment:
252
+ INFRAHUB_PRODUCTION: ${INFRAHUB_TESTING_PRODUCTION}
253
+ INFRAHUB_LOG_LEVEL: ${INFRAHUB_TESTING_LOG_LEVEL}
254
+ INFRAHUB_GIT_REPOSITORIES_DIRECTORY: ${INFRAHUB_TESTING_GIT_REPOSITORIES_DIRECTORY}
255
+ INFRAHUB_API_TOKEN: ${INFRAHUB_TESTING_INITIAL_AGENT_TOKEN}
256
+ INFRAHUB_SECURITY_SECRET_KEY: ${INFRAHUB_TESTING_SECURITY_SECRET_KEY}
257
+ INFRAHUB_ADDRESS: ${INFRAHUB_TESTING_ADDRESS}
258
+ INFRAHUB_INTERNAL_ADDRESS: ${INFRAHUB_TESTING_INTERNAL_ADDRESS}
259
+ INFRAHUB_BROKER_ADDRESS: ${INFRAHUB_TESTING_BROKER_ADDRESS}
260
+ INFRAHUB_CACHE_ADDRESS: ${INFRAHUB_TESTING_CACHE_ADDRESS}
261
+ INFRAHUB_DB_ADDRESS: ${INFRAHUB_TESTING_DB_ADDRESS:-database}
262
+ INFRAHUB_DB_PROTOCOL: ${INFRAHUB_TESTING_DB_PROTOCOL:-neo4j}
263
+ INFRAHUB_WORKFLOW_ADDRESS: ${INFRAHUB_TESTING_WORKFLOW_ADDRESS}
264
+ INFRAHUB_TIMEOUT: ${INFRAHUB_TESTING_TIMEOUT}
265
+ PREFECT_API_URL: ${INFRAHUB_TESTING_PREFECT_API}
266
+ # Tracing
267
+ INFRAHUB_TRACE_ENABLE: ${INFRAHUB_TRACE_ENABLE:-false}
268
+ INFRAHUB_TRACE_EXPORTER_ENDPOINT:
269
+ INFRAHUB_TRACE_EXPORTER_PROTOCOL: ${INFRAHUB_TRACE_EXPORTER_PROTOCOL:-grpc}
270
+ INFRAHUB_TRACE_EXPORTER_TYPE: ${INFRAHUB_TRACE_EXPORTER_TYPE:-console}
271
+ INFRAHUB_TRACE_INSECURE: ${INFRAHUB_TRACE_INSECURE:-true}
272
+ OTEL_RESOURCE_ATTRIBUTES:
273
+ depends_on:
274
+ - infrahub-server
275
+ volumes:
276
+ - "./${INFRAHUB_TESTING_LOCAL_REMOTE_GIT_DIRECTORY}:${INFRAHUB_TESTING_INTERNAL_REMOTE_GIT_DIRECTORY}"
277
+ tty: true
278
+
279
+ cadvisor:
280
+ image: "${CADVISOR_DOCKER_IMAGE:-gcr.io/cadvisor/cadvisor:v0.51.0}"
281
+ command:
282
+ - -disable_root_cgroup_stats=true
283
+ - -docker_only=true
284
+ - -store_container_labels=false
285
+ - -whitelisted_container_labels=com.docker.compose.project
286
+ privileged: true
287
+ volumes:
288
+ - /:/rootfs:ro
289
+ - /var/run:/var/run:ro
290
+ - /sys:/sys:ro
291
+ - /var/lib/docker:/var/lib/docker:ro
292
+ - /dev/disk/:/dev/disk:ro
293
+ ports:
294
+ - "${INFRAHUB_TESTING_CADVISOR_PORT:-0}:8080"
295
+
296
+ scraper:
297
+ image: "${SCRAPER_DOCKER_IMAGE:-victoriametrics/victoria-metrics:v1.110.0}"
298
+ volumes:
299
+ - vmdata:/victoria-metrics-data
300
+ - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
301
+ command:
302
+ - "--promscrape.config=/etc/prometheus/prometheus.yml"
303
+ ports:
304
+ - ${INFRAHUB_TESTING_SCRAPER_PORT:-0}:8428
305
+ healthcheck:
306
+ test: wget -qO- http://127.0.0.1:8428/-/healthy
307
+ start_period: 10s
308
+ interval: 5s
309
+ timeout: 5s
310
+ retries: 10
311
+
312
+ volumes:
313
+ database_data:
314
+ database_logs:
315
+ database_data_core2:
316
+ database_logs_core2:
317
+ database_data_core3:
318
+ database_logs_core3:
319
+ storage_data:
320
+ workflow_db:
321
+ vmdata: