infrahub-server 1.2.10__py3-none-any.whl → 1.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. infrahub/config.py +9 -0
  2. infrahub/core/constants/database.py +1 -0
  3. infrahub/core/constants/infrahubkind.py +1 -0
  4. infrahub/core/constraint/node/runner.py +1 -1
  5. infrahub/core/diff/query/save.py +75 -45
  6. infrahub/core/diff/query_parser.py +5 -1
  7. infrahub/core/diff/tasks.py +3 -3
  8. infrahub/core/graph/__init__.py +1 -1
  9. infrahub/core/migrations/graph/__init__.py +6 -0
  10. infrahub/core/migrations/graph/m029_duplicates_cleanup.py +680 -0
  11. infrahub/core/migrations/graph/m030_illegal_edges.py +83 -0
  12. infrahub/core/migrations/query/attribute_add.py +13 -9
  13. infrahub/core/node/resource_manager/ip_address_pool.py +6 -2
  14. infrahub/core/node/resource_manager/ip_prefix_pool.py +6 -2
  15. infrahub/core/protocols.py +4 -0
  16. infrahub/core/query/diff.py +7 -0
  17. infrahub/core/schema/definitions/core/__init__.py +8 -1
  18. infrahub/core/schema/definitions/core/resource_pool.py +20 -0
  19. infrahub/core/schema/schema_branch.py +5 -3
  20. infrahub/core/validators/tasks.py +1 -1
  21. infrahub/database/__init__.py +5 -4
  22. infrahub/database/validation.py +101 -0
  23. infrahub/graphql/app.py +1 -1
  24. infrahub/graphql/loaders/node.py +1 -1
  25. infrahub/graphql/loaders/peers.py +1 -1
  26. infrahub/graphql/mutations/main.py +1 -1
  27. infrahub/graphql/mutations/proposed_change.py +1 -1
  28. infrahub/graphql/queries/relationship.py +1 -1
  29. infrahub/graphql/queries/task.py +10 -0
  30. infrahub/graphql/resolvers/many_relationship.py +4 -4
  31. infrahub/graphql/resolvers/resolver.py +4 -4
  32. infrahub/graphql/resolvers/single_relationship.py +2 -2
  33. infrahub/graphql/subscription/graphql_query.py +2 -2
  34. infrahub/graphql/types/branch.py +1 -1
  35. infrahub/graphql/types/task_log.py +3 -2
  36. infrahub/message_bus/operations/refresh/registry.py +4 -4
  37. infrahub/message_bus/operations/requests/proposed_change.py +4 -4
  38. infrahub/patch/queries/delete_duplicated_edges.py +40 -29
  39. infrahub/task_manager/task.py +44 -4
  40. infrahub/telemetry/database.py +1 -1
  41. infrahub/telemetry/tasks.py +1 -1
  42. infrahub/webhook/tasks.py +2 -1
  43. {infrahub_server-1.2.10.dist-info → infrahub_server-1.2.12.dist-info}/METADATA +3 -3
  44. {infrahub_server-1.2.10.dist-info → infrahub_server-1.2.12.dist-info}/RECORD +52 -49
  45. {infrahub_server-1.2.10.dist-info → infrahub_server-1.2.12.dist-info}/WHEEL +1 -1
  46. infrahub_testcontainers/container.py +239 -64
  47. infrahub_testcontainers/docker-compose-cluster.test.yml +321 -0
  48. infrahub_testcontainers/docker-compose.test.yml +1 -0
  49. infrahub_testcontainers/helpers.py +15 -1
  50. infrahub_testcontainers/plugin.py +9 -0
  51. infrahub/patch/queries/consolidate_duplicated_nodes.py +0 -109
  52. {infrahub_server-1.2.10.dist-info → infrahub_server-1.2.12.dist-info}/LICENSE.txt +0 -0
  53. {infrahub_server-1.2.10.dist-info → infrahub_server-1.2.12.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,321 @@
1
+ ---
2
+ # yamllint disable rule:line-length
3
+ # The following environment variables are part of the Infrahub configuration options.
4
+ # For detailed information on these configuration options, please refer to the Infrahub documentation:
5
+ # https://docs.infrahub.app/reference/configuration
6
+ x-neo4j-config-common: &neo4j-config-common
7
+ NEO4J_AUTH: neo4j/admin
8
+ NEO4J_dbms_security_procedures_unrestricted: apoc.*
9
+ NEO4J_dbms_security_auth__minimum__password__length: 4
10
+ NEO4J_ACCEPT_LICENSE_AGREEMENT: 'yes'
11
+ NEO4J_server_backup_enabled: true
12
+ NEO4J_metrics_prometheus_enabled: true
13
+ NEO4J_server_metrics_filter: '*'
14
+ NEO4J_server_cluster_system__database__mode: PRIMARY
15
+ NEO4J_initial_server_mode__constraint: PRIMARY
16
+ NEO4J_dbms_cluster_discovery_endpoints: database:5000,database-core2:5000,database-core3:5000
17
+ NEO4J_initial_dbms_default__primaries__count: 3
18
+ NEO4J_dbms_memory_heap_initial__size: ${INFRAHUB_TESTING_DB_HEAP_INITIAL_SIZE}
19
+ NEO4J_dbms_memory_heap_max__size: ${INFRAHUB_TESTING_DB_HEAP_MAX_SIZE}
20
+ NEO4J_server_memory_pagecache_size: ${INFRAHUB_TESTING_DB_PAGECACHE_SIZE}
21
+
22
+
23
+ services:
24
+ message-queue:
25
+ image: ${MESSAGE_QUEUE_DOCKER_IMAGE:-rabbitmq:3.13.7-management}
26
+ restart: unless-stopped
27
+ environment:
28
+ RABBITMQ_DEFAULT_USER: infrahub
29
+ RABBITMQ_DEFAULT_PASS: infrahub
30
+ healthcheck:
31
+ test: rabbitmq-diagnostics -q check_port_connectivity
32
+ interval: 5s
33
+ timeout: 30s
34
+ retries: 10
35
+ start_period: 3s
36
+ ports:
37
+ - ${INFRAHUB_TESTING_MESSAGE_QUEUE_PORT:-0}:15692
38
+
39
+ cache:
40
+ image: ${CACHE_DOCKER_IMAGE:-redis:7.2.4}
41
+ restart: unless-stopped
42
+ healthcheck:
43
+ test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
44
+ interval: 5s
45
+ timeout: 5s
46
+ retries: 3
47
+
48
+ infrahub-server-lb:
49
+ image: haproxy:3.1-alpine
50
+ volumes:
51
+ - ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg
52
+ depends_on:
53
+ infrahub-server:
54
+ condition: service_started
55
+ healthcheck:
56
+ test: wget -O /dev/null http://127.0.0.1:8000/api/config || exit 1
57
+ interval: 5s
58
+ timeout: 5s
59
+ retries: 20
60
+ start_period: 10s
61
+ ports:
62
+ - ${INFRAHUB_TESTING_SERVER_PORT:-0}:8000
63
+
64
+ database:
65
+ deploy:
66
+ resources:
67
+ limits:
68
+ cpus: ${INFRAHUB_TESTING_DB_CPU_LIMIT}
69
+ memory: ${INFRAHUB_TESTING_DB_MEMORY_LIMIT}
70
+ image: "${DATABASE_DOCKER_IMAGE:-neo4j:5.20.0-enterprise}"
71
+ restart: unless-stopped
72
+ environment:
73
+ <<: *neo4j-config-common
74
+ NEO4J_metrics_prometheus_endpoint: 0.0.0.0:2004
75
+ NEO4J_server_backup_listen__address: 0.0.0.0:6362
76
+ NEO4J_server_discovery_advertised__address: database:5000
77
+ NEO4J_server_cluster_advertised__address: database:6000
78
+ NEO4J_server_cluster_raft_advertised__address: database:7000
79
+ NEO4J_server_bolt_advertised__address: database:7687
80
+ NEO4J_server_http_advertised__address: database:7474
81
+ NEO4J_server_https_advertised__address: database:7473
82
+ volumes:
83
+ - "database_data:/data"
84
+ - "database_logs:/logs"
85
+ - "./${INFRAHUB_TESTING_LOCAL_DB_BACKUP_DIRECTORY}:${INFRAHUB_TESTING_INTERNAL_DB_BACKUP_DIRECTORY}"
86
+ healthcheck:
87
+ test: wget http://localhost:7474 || exit 1
88
+ interval: 2s
89
+ timeout: 10s
90
+ retries: 20
91
+ start_period: 3s
92
+ ports:
93
+ - ${INFRAHUB_TESTING_DATABASE_PORT:-0}:6362
94
+ - ${INFRAHUB_TESTING_DATABASE_UI_PORT:-0}:7474
95
+
96
+ database-core2:
97
+ deploy:
98
+ resources:
99
+ limits:
100
+ cpus: ${INFRAHUB_TESTING_DB_CPU_LIMIT}
101
+ memory: ${INFRAHUB_TESTING_DB_MEMORY_LIMIT}
102
+ image: "${DATABASE_DOCKER_IMAGE:-neo4j:5.20.0-enterprise}"
103
+ environment:
104
+ <<: *neo4j-config-common
105
+ NEO4J_metrics_prometheus_endpoint: 0.0.0.0:2005
106
+ NEO4J_server_backup_listen__address: 0.0.0.0:6363
107
+ NEO4J_server_discovery_advertised__address: database-core2:5000
108
+ NEO4J_server_cluster_advertised__address: database-core2:6000
109
+ NEO4J_server_cluster_raft_advertised__address: database-core2:7000
110
+ NEO4J_server_bolt_advertised__address: database-core2:7687
111
+ NEO4J_server_http_advertised__address: database-core2:7474
112
+ NEO4J_server_https_advertised__address: database-core2:7473
113
+ volumes:
114
+ - "./plugins:/plugins"
115
+ - "database_data_core2:/data"
116
+ - "database_logs_core2:/logs"
117
+ healthcheck:
118
+ test: wget http://localhost:7474 || exit 1
119
+ interval: 5s
120
+ timeout: 10s
121
+ retries: 40
122
+ start_period: 30s
123
+ labels:
124
+ infrahub_role: "database"
125
+ com.github.run_id: "${GITHUB_RUN_ID:-unknown}"
126
+ com.github.job: "${JOB_NAME:-unknown}"
127
+ ports:
128
+ - "${INFRAHUB_TESTING_DATABASE_PORT:-0}:6363"
129
+
130
+ database-core3:
131
+ deploy:
132
+ resources:
133
+ limits:
134
+ cpus: ${INFRAHUB_TESTING_DB_CPU_LIMIT}
135
+ memory: ${INFRAHUB_TESTING_DB_MEMORY_LIMIT}
136
+ image: "${DATABASE_DOCKER_IMAGE:-neo4j:5.20.0-enterprise}"
137
+ environment:
138
+ <<: *neo4j-config-common
139
+ NEO4J_metrics_prometheus_endpoint: 0.0.0.0:2006
140
+ NEO4J_server_backup_listen__address: 0.0.0.0:6364
141
+ NEO4J_server_discovery_advertised__address: database-core3:5000
142
+ NEO4J_server_cluster_advertised__address: database-core3:6000
143
+ NEO4J_server_cluster_raft_advertised__address: database-core3:7000
144
+ NEO4J_server_bolt_advertised__address: database-core3:7687
145
+ NEO4J_server_http_advertised__address: database-core3:7474
146
+ NEO4J_server_https_advertised__address: database-core3:7473
147
+ volumes:
148
+ - "./plugins:/plugins"
149
+ - "database_data_core3:/data"
150
+ - "database_logs_core3:/logs"
151
+ healthcheck:
152
+ test: wget http://localhost:7474 || exit 1
153
+ interval: 5s
154
+ timeout: 10s
155
+ retries: 40
156
+ start_period: 30s
157
+ labels:
158
+ infrahub_role: "database"
159
+ com.github.run_id: "${GITHUB_RUN_ID:-unknown}"
160
+ com.github.job: "${JOB_NAME:-unknown}"
161
+ ports:
162
+ - "${INFRAHUB_TESTING_DATABASE_PORT:-0}:6364"
163
+
164
+ task-manager:
165
+ image: "${INFRAHUB_TESTING_DOCKER_IMAGE}:${INFRAHUB_TESTING_IMAGE_VERSION}"
166
+ command: uvicorn --host 0.0.0.0 --port 4200 --factory infrahub.prefect_server.app:create_infrahub_prefect
167
+ depends_on:
168
+ task-manager-db:
169
+ condition: service_healthy
170
+ environment:
171
+ PREFECT_UI_ENABLED: "${INFRAHUB_TESTING_PREFECT_UI_ENABLED}" # enabling UI requires permissions, run container as root to enable UI
172
+ PREFECT_API_DATABASE_CONNECTION_URL: postgresql+asyncpg://postgres:postgres@task-manager-db:5432/prefect
173
+ healthcheck:
174
+ test: /usr/local/bin/httpx http://localhost:4200/api/health || exit 1
175
+ interval: 5s
176
+ timeout: 5s
177
+ retries: 20
178
+ start_period: 10s
179
+ ports:
180
+ - ${INFRAHUB_TESTING_TASK_MANAGER_PORT:-0}:4200
181
+
182
+ task-manager-db:
183
+ image: "${POSTGRES_DOCKER_IMAGE:-postgres:16-alpine}"
184
+ environment:
185
+ - POSTGRES_USER=postgres
186
+ - POSTGRES_PASSWORD=postgres
187
+ - POSTGRES_DB=prefect
188
+ volumes:
189
+ - workflow_db:/var/lib/postgresql/data
190
+ healthcheck:
191
+ test: ["CMD-SHELL", "pg_isready"]
192
+ interval: 10s
193
+ timeout: 5s
194
+ retries: 5
195
+
196
+ infrahub-server:
197
+ deploy:
198
+ mode: replicated
199
+ replicas: ${INFRAHUB_TESTING_API_SERVER_COUNT}
200
+ image: "${INFRAHUB_TESTING_DOCKER_IMAGE}:${INFRAHUB_TESTING_IMAGE_VERSION}"
201
+ command: ${INFRAHUB_TESTING_DOCKER_ENTRYPOINT}
202
+ environment:
203
+ INFRAHUB_PRODUCTION: ${INFRAHUB_TESTING_PRODUCTION}
204
+ INFRAHUB_LOG_LEVEL: ${INFRAHUB_TESTING_LOG_LEVEL:-INFO}
205
+ INFRAHUB_BROKER_ADDRESS: ${INFRAHUB_TESTING_BROKER_ADDRESS}
206
+ INFRAHUB_CACHE_ADDRESS: ${INFRAHUB_TESTING_CACHE_ADDRESS}
207
+ INFRAHUB_DB_ADDRESS: ${INFRAHUB_TESTING_DB_ADDRESS}
208
+ INFRAHUB_DB_PROTOCOL: ${INFRAHUB_TESTING_DB_PROTOCOL:-neo4j}
209
+ INFRAHUB_WORKFLOW_ADDRESS: ${INFRAHUB_TESTING_WORKFLOW_ADDRESS}
210
+ INFRAHUB_WORKFLOW_DEFAULT_WORKER_TYPE: ${INFRAHUB_TESTING_WORKFLOW_DEFAULT_WORKER_TYPE}
211
+ INFRAHUB_INITIAL_ADMIN_TOKEN: ${INFRAHUB_TESTING_INITIAL_ADMIN_TOKEN}
212
+ INFRAHUB_INITIAL_AGENT_TOKEN: ${INFRAHUB_TESTING_INITIAL_AGENT_TOKEN}
213
+ INFRAHUB_SECURITY_SECRET_KEY: ${INFRAHUB_TESTING_SECURITY_SECRET_KEY}
214
+ PREFECT_API_URL: ${INFRAHUB_TESTING_PREFECT_API}
215
+ # Tracing
216
+ INFRAHUB_TRACE_ENABLE: ${INFRAHUB_TRACE_ENABLE:-false}
217
+ INFRAHUB_TRACE_EXPORTER_ENDPOINT:
218
+ INFRAHUB_TRACE_EXPORTER_PROTOCOL: ${INFRAHUB_TRACE_EXPORTER_PROTOCOL:-grpc}
219
+ INFRAHUB_TRACE_EXPORTER_TYPE: ${INFRAHUB_TRACE_EXPORTER_TYPE:-console}
220
+ INFRAHUB_TRACE_INSECURE: ${INFRAHUB_TRACE_INSECURE:-true}
221
+ OTEL_RESOURCE_ATTRIBUTES:
222
+ depends_on:
223
+ database:
224
+ condition: service_healthy
225
+ database-core2:
226
+ condition: service_healthy
227
+ database-core3:
228
+ condition: service_healthy
229
+ message-queue:
230
+ condition: service_healthy
231
+ cache:
232
+ condition: service_healthy
233
+ task-manager:
234
+ condition: service_healthy
235
+ volumes:
236
+ - "storage_data:/opt/infrahub/storage"
237
+ tty: true
238
+ healthcheck:
239
+ test: curl -s -f -o /dev/null http://localhost:8000/api/config || exit 1
240
+ interval: 5s
241
+ timeout: 5s
242
+ retries: 20
243
+ start_period: 10s
244
+
245
+ task-worker:
246
+ deploy:
247
+ mode: replicated
248
+ replicas: ${INFRAHUB_TESTING_TASK_WORKER_COUNT}
249
+ image: "${INFRAHUB_TESTING_DOCKER_IMAGE}:${INFRAHUB_TESTING_IMAGE_VERSION}"
250
+ command: prefect worker start --type ${INFRAHUB_TESTING_WORKFLOW_DEFAULT_WORKER_TYPE} --pool infrahub-worker --with-healthcheck
251
+ environment:
252
+ INFRAHUB_PRODUCTION: ${INFRAHUB_TESTING_PRODUCTION}
253
+ INFRAHUB_LOG_LEVEL: ${INFRAHUB_TESTING_LOG_LEVEL}
254
+ INFRAHUB_GIT_REPOSITORIES_DIRECTORY: ${INFRAHUB_TESTING_GIT_REPOSITORIES_DIRECTORY}
255
+ INFRAHUB_API_TOKEN: ${INFRAHUB_TESTING_INITIAL_AGENT_TOKEN}
256
+ INFRAHUB_SECURITY_SECRET_KEY: ${INFRAHUB_TESTING_SECURITY_SECRET_KEY}
257
+ INFRAHUB_ADDRESS: ${INFRAHUB_TESTING_ADDRESS}
258
+ INFRAHUB_INTERNAL_ADDRESS: ${INFRAHUB_TESTING_INTERNAL_ADDRESS}
259
+ INFRAHUB_BROKER_ADDRESS: ${INFRAHUB_TESTING_BROKER_ADDRESS}
260
+ INFRAHUB_CACHE_ADDRESS: ${INFRAHUB_TESTING_CACHE_ADDRESS}
261
+ INFRAHUB_DB_ADDRESS: ${INFRAHUB_TESTING_DB_ADDRESS:-database}
262
+ INFRAHUB_DB_PROTOCOL: ${INFRAHUB_TESTING_DB_PROTOCOL:-neo4j}
263
+ INFRAHUB_WORKFLOW_ADDRESS: ${INFRAHUB_TESTING_WORKFLOW_ADDRESS}
264
+ INFRAHUB_TIMEOUT: ${INFRAHUB_TESTING_TIMEOUT}
265
+ PREFECT_API_URL: ${INFRAHUB_TESTING_PREFECT_API}
266
+ # Tracing
267
+ INFRAHUB_TRACE_ENABLE: ${INFRAHUB_TRACE_ENABLE:-false}
268
+ INFRAHUB_TRACE_EXPORTER_ENDPOINT:
269
+ INFRAHUB_TRACE_EXPORTER_PROTOCOL: ${INFRAHUB_TRACE_EXPORTER_PROTOCOL:-grpc}
270
+ INFRAHUB_TRACE_EXPORTER_TYPE: ${INFRAHUB_TRACE_EXPORTER_TYPE:-console}
271
+ INFRAHUB_TRACE_INSECURE: ${INFRAHUB_TRACE_INSECURE:-true}
272
+ OTEL_RESOURCE_ATTRIBUTES:
273
+ depends_on:
274
+ - infrahub-server
275
+ volumes:
276
+ - "./${INFRAHUB_TESTING_LOCAL_REMOTE_GIT_DIRECTORY}:${INFRAHUB_TESTING_INTERNAL_REMOTE_GIT_DIRECTORY}"
277
+ tty: true
278
+
279
+ cadvisor:
280
+ image: "${CADVISOR_DOCKER_IMAGE:-gcr.io/cadvisor/cadvisor:v0.51.0}"
281
+ command:
282
+ - -disable_root_cgroup_stats=true
283
+ - -docker_only=true
284
+ - -store_container_labels=false
285
+ - -whitelisted_container_labels=com.docker.compose.project
286
+ privileged: true
287
+ volumes:
288
+ - /:/rootfs:ro
289
+ - /var/run:/var/run:ro
290
+ - /sys:/sys:ro
291
+ - /var/lib/docker:/var/lib/docker:ro
292
+ - /dev/disk/:/dev/disk:ro
293
+ ports:
294
+ - "${INFRAHUB_TESTING_CADVISOR_PORT:-0}:8080"
295
+
296
+ scraper:
297
+ image: "${SCRAPER_DOCKER_IMAGE:-victoriametrics/victoria-metrics:v1.110.0}"
298
+ volumes:
299
+ - vmdata:/victoria-metrics-data
300
+ - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
301
+ command:
302
+ - "--promscrape.config=/etc/prometheus/prometheus.yml"
303
+ ports:
304
+ - ${INFRAHUB_TESTING_SCRAPER_PORT:-0}:8428
305
+ healthcheck:
306
+ test: wget -qO- http://127.0.0.1:8428/-/healthy
307
+ start_period: 10s
308
+ interval: 5s
309
+ timeout: 5s
310
+ retries: 10
311
+
312
+ volumes:
313
+ database_data:
314
+ database_logs:
315
+ database_data_core2:
316
+ database_logs_core2:
317
+ database_data_core3:
318
+ database_logs_core3:
319
+ storage_data:
320
+ workflow_db:
321
+ vmdata:
@@ -59,6 +59,7 @@ services:
59
59
  NEO4J_ACCEPT_LICENSE_AGREEMENT: "yes"
60
60
  NEO4J_server_memory_heap_initial__size: ${INFRAHUB_TESTING_DB_HEAP_INITIAL_SIZE}
61
61
  NEO4J_server_memory_heap_max__size: ${INFRAHUB_TESTING_DB_HEAP_MAX_SIZE}
62
+ NEO4J_server_memory_pagecache_size: ${INFRAHUB_TESTING_DB_PAGECACHE_SIZE}
62
63
  volumes:
63
64
  - "database_data:/data"
64
65
  - "database_logs:/logs"
@@ -1,5 +1,6 @@
1
1
  import os
2
2
  import subprocess # noqa: S404
3
+ import warnings
3
4
  from pathlib import Path
4
5
 
5
6
  import pytest
@@ -59,6 +60,10 @@ class TestInfrahubDocker:
59
60
  def default_branch(self) -> str:
60
61
  return "main"
61
62
 
63
+ @pytest.fixture(scope="class")
64
+ def deployment_type(self, request: pytest.FixtureRequest) -> str | None:
65
+ return request.config.getoption(name="infrahub_deployment_type", default=None)
66
+
62
67
  @pytest.fixture(scope="class")
63
68
  def infrahub_compose(
64
69
  self,
@@ -66,12 +71,21 @@ class TestInfrahubDocker:
66
71
  remote_repos_dir: Path, # initialize repository before running docker compose to fix permissions issues # noqa: ARG002
67
72
  remote_backups_dir: Path, # noqa: ARG002
68
73
  infrahub_version: str,
74
+ deployment_type: str | None,
69
75
  ) -> InfrahubDockerCompose:
70
- return InfrahubDockerCompose.init(directory=tmp_directory, version=infrahub_version)
76
+ return InfrahubDockerCompose.init(
77
+ directory=tmp_directory, version=infrahub_version, deployment_type=deployment_type
78
+ )
71
79
 
72
80
  @pytest.fixture(scope="class")
73
81
  def infrahub_app(self, request: pytest.FixtureRequest, infrahub_compose: InfrahubDockerCompose) -> dict[str, int]:
82
+ tests_failed_before_class = request.session.testsfailed
83
+
74
84
  def cleanup() -> None:
85
+ tests_failed_during_class = request.session.testsfailed - tests_failed_before_class
86
+ if tests_failed_during_class > 0:
87
+ stdout, stderr = infrahub_compose.get_logs("infrahub-server", "task-worker")
88
+ warnings.warn(f"Container logs:\nStdout:\n{stdout}\nStderr:\n{stderr}", stacklevel=2)
75
89
  infrahub_compose.stop()
76
90
 
77
91
  request.addfinalizer(cleanup)
@@ -13,6 +13,15 @@ if TYPE_CHECKING:
13
13
  def pytest_addoption(parser: pytest.Parser) -> None:
14
14
  group = parser.getgroup("infrahub-performance-test")
15
15
 
16
+ group.addoption(
17
+ "--deployment-type",
18
+ action="store",
19
+ dest="infrahub_deployment_type",
20
+ default=None,
21
+ metavar="INFRAHUB_DEPLOYMENT_TYPE",
22
+ help="Type of deployment to use (default: None, options: cluster)",
23
+ )
24
+
16
25
  group.addoption(
17
26
  "--performance-result-address",
18
27
  action="store",
@@ -1,109 +0,0 @@
1
- from ..models import EdgeToAdd, EdgeToDelete, PatchPlan, VertexToDelete
2
- from .base import PatchQuery
3
-
4
-
5
- class ConsolidateDuplicatedNodesPatchQuery(PatchQuery):
6
- """
7
- Find any groups of nodes with the same labels and properties, move all the edges to one of the duplicated nodes,
8
- then delete the other duplicated nodes
9
- """
10
-
11
- @property
12
- def name(self) -> str:
13
- return "consolidate-duplicated-nodes"
14
-
15
- async def plan(self) -> PatchPlan:
16
- query = """
17
- //------------
18
- // Find nodes with the same labels and UUID
19
- //------------
20
- MATCH (n:Node)
21
- WITH n.uuid AS node_uuid, count(*) as num_nodes_with_uuid
22
- WHERE num_nodes_with_uuid > 1
23
- WITH DISTINCT node_uuid
24
- MATCH (n:Node {uuid: node_uuid})
25
- CALL {
26
- WITH n
27
- WITH labels(n) AS n_labels
28
- UNWIND n_labels AS n_label
29
- WITH n_label
30
- ORDER BY n_label ASC
31
- RETURN collect(n_label) AS sorted_labels
32
- }
33
- WITH n.uuid AS n_uuid, sorted_labels, collect(n) AS duplicate_nodes
34
- WHERE size(duplicate_nodes) > 1
35
- WITH n_uuid, head(duplicate_nodes) AS node_to_keep, tail(duplicate_nodes) AS nodes_to_delete
36
- UNWIND nodes_to_delete AS node_to_delete
37
- //------------
38
- // Find the edges that we need to move to the selected node_to_keep
39
- //------------
40
- CALL {
41
- WITH node_to_keep, node_to_delete
42
- MATCH (node_to_delete)-[edge_to_delete]->(peer)
43
- RETURN {
44
- from_id: %(id_func_name)s(node_to_keep),
45
- to_id: %(id_func_name)s(peer),
46
- edge_type: type(edge_to_delete),
47
- after_props: properties(edge_to_delete)
48
- } AS edge_to_create
49
- UNION
50
- WITH node_to_keep, node_to_delete
51
- MATCH (node_to_delete)<-[edge_to_delete]-(peer)
52
- RETURN {
53
- from_id: %(id_func_name)s(peer),
54
- to_id: %(id_func_name)s(node_to_keep),
55
- edge_type: type(edge_to_delete),
56
- after_props: properties(edge_to_delete)
57
- } AS edge_to_create
58
- }
59
- WITH node_to_delete, collect(edge_to_create) AS edges_to_create
60
- //------------
61
- // Find the edges that we need to remove from the duplicated nodes
62
- //------------
63
- CALL {
64
- WITH node_to_delete
65
- MATCH (node_to_delete)-[e]->(peer)
66
- RETURN {
67
- db_id: %(id_func_name)s(e),
68
- from_id: %(id_func_name)s(node_to_delete),
69
- to_id: %(id_func_name)s(peer),
70
- edge_type: type(e),
71
- before_props: properties(e)
72
- } AS edge_to_delete
73
- UNION
74
- WITH node_to_delete
75
- MATCH (node_to_delete)<-[e]-(peer)
76
- RETURN {
77
- db_id: %(id_func_name)s(e),
78
- from_id: %(id_func_name)s(peer),
79
- to_id: %(id_func_name)s(node_to_delete),
80
- edge_type: type(e),
81
- before_props: properties(e)
82
- } AS edge_to_delete
83
- }
84
- WITH node_to_delete, edges_to_create, collect(edge_to_delete) AS edges_to_delete
85
- RETURN
86
- {db_id: %(id_func_name)s(node_to_delete), labels: labels(node_to_delete), before_props: properties(node_to_delete)} AS vertex_to_delete,
87
- edges_to_create,
88
- edges_to_delete
89
- """ % {"id_func_name": self.db.get_id_function_name()}
90
- results = await self.db.execute_query(query=query)
91
- vertices_to_delete: list[VertexToDelete] = []
92
- edges_to_delete: list[EdgeToDelete] = []
93
- edges_to_add: list[EdgeToAdd] = []
94
- for result in results:
95
- serial_vertex_to_delete = result.get("vertex_to_delete")
96
- if serial_vertex_to_delete:
97
- vertex_to_delete = VertexToDelete(**serial_vertex_to_delete)
98
- vertices_to_delete.append(vertex_to_delete)
99
- for serial_edge_to_delete in result.get("edges_to_delete"):
100
- edge_to_delete = EdgeToDelete(**serial_edge_to_delete)
101
- edges_to_delete.append(edge_to_delete)
102
- for serial_edge_to_create in result.get("edges_to_create"):
103
- edges_to_add.append(EdgeToAdd(**serial_edge_to_create))
104
- return PatchPlan(
105
- name=self.name,
106
- vertices_to_delete=vertices_to_delete,
107
- edges_to_add=edges_to_add,
108
- edges_to_delete=edges_to_delete,
109
- )