infrahub-server 1.2.9rc0__py3-none-any.whl → 1.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. infrahub/computed_attribute/models.py +13 -0
  2. infrahub/computed_attribute/tasks.py +48 -26
  3. infrahub/config.py +9 -0
  4. infrahub/core/attribute.py +43 -2
  5. infrahub/core/branch/models.py +8 -9
  6. infrahub/core/branch/tasks.py +0 -2
  7. infrahub/core/constants/infrahubkind.py +1 -0
  8. infrahub/core/constraint/node/runner.py +1 -1
  9. infrahub/core/diff/calculator.py +65 -11
  10. infrahub/core/diff/combiner.py +38 -31
  11. infrahub/core/diff/coordinator.py +44 -28
  12. infrahub/core/diff/data_check_synchronizer.py +3 -2
  13. infrahub/core/diff/enricher/hierarchy.py +36 -27
  14. infrahub/core/diff/ipam_diff_parser.py +5 -4
  15. infrahub/core/diff/merger/merger.py +46 -16
  16. infrahub/core/diff/merger/serializer.py +1 -0
  17. infrahub/core/diff/model/field_specifiers_map.py +64 -0
  18. infrahub/core/diff/model/path.py +58 -58
  19. infrahub/core/diff/parent_node_adder.py +14 -16
  20. infrahub/core/diff/query/drop_nodes.py +42 -0
  21. infrahub/core/diff/query/field_specifiers.py +8 -7
  22. infrahub/core/diff/query/filters.py +15 -1
  23. infrahub/core/diff/query/merge.py +264 -28
  24. infrahub/core/diff/query/save.py +6 -2
  25. infrahub/core/diff/query_parser.py +55 -65
  26. infrahub/core/diff/repository/deserializer.py +38 -24
  27. infrahub/core/diff/repository/repository.py +31 -12
  28. infrahub/core/diff/tasks.py +3 -3
  29. infrahub/core/graph/__init__.py +1 -1
  30. infrahub/core/migrations/graph/__init__.py +2 -0
  31. infrahub/core/migrations/graph/m027_delete_isolated_nodes.py +50 -0
  32. infrahub/core/migrations/graph/m028_delete_diffs.py +38 -0
  33. infrahub/core/node/resource_manager/ip_address_pool.py +6 -2
  34. infrahub/core/node/resource_manager/ip_prefix_pool.py +6 -2
  35. infrahub/core/protocols.py +4 -0
  36. infrahub/core/query/branch.py +27 -17
  37. infrahub/core/query/diff.py +169 -51
  38. infrahub/core/query/node.py +39 -5
  39. infrahub/core/query/relationship.py +105 -30
  40. infrahub/core/query/subquery.py +2 -2
  41. infrahub/core/relationship/model.py +1 -1
  42. infrahub/core/schema/definitions/core/__init__.py +8 -1
  43. infrahub/core/schema/definitions/core/resource_pool.py +20 -0
  44. infrahub/core/schema/schema_branch.py +3 -0
  45. infrahub/core/validators/tasks.py +1 -1
  46. infrahub/core/validators/uniqueness/query.py +7 -0
  47. infrahub/database/__init__.py +5 -4
  48. infrahub/graphql/app.py +1 -1
  49. infrahub/graphql/loaders/node.py +1 -1
  50. infrahub/graphql/loaders/peers.py +1 -1
  51. infrahub/graphql/mutations/proposed_change.py +1 -1
  52. infrahub/graphql/queries/diff/tree.py +2 -1
  53. infrahub/graphql/queries/relationship.py +1 -1
  54. infrahub/graphql/queries/task.py +10 -0
  55. infrahub/graphql/resolvers/many_relationship.py +4 -4
  56. infrahub/graphql/resolvers/resolver.py +4 -4
  57. infrahub/graphql/resolvers/single_relationship.py +2 -2
  58. infrahub/graphql/subscription/graphql_query.py +2 -2
  59. infrahub/graphql/types/branch.py +1 -1
  60. infrahub/graphql/types/task_log.py +3 -2
  61. infrahub/message_bus/operations/refresh/registry.py +1 -1
  62. infrahub/task_manager/task.py +44 -4
  63. infrahub/telemetry/database.py +1 -1
  64. infrahub/telemetry/tasks.py +1 -1
  65. infrahub/trigger/models.py +11 -1
  66. infrahub/trigger/setup.py +51 -15
  67. infrahub/trigger/tasks.py +1 -4
  68. infrahub/types.py +1 -1
  69. infrahub/webhook/models.py +2 -1
  70. infrahub/workflows/catalogue.py +9 -0
  71. infrahub/workflows/initialization.py +1 -3
  72. infrahub_sdk/timestamp.py +2 -2
  73. {infrahub_server-1.2.9rc0.dist-info → infrahub_server-1.2.11.dist-info}/METADATA +3 -3
  74. {infrahub_server-1.2.9rc0.dist-info → infrahub_server-1.2.11.dist-info}/RECORD +79 -75
  75. infrahub_testcontainers/docker-compose.test.yml +3 -3
  76. infrahub_testcontainers/performance_test.py +6 -3
  77. {infrahub_server-1.2.9rc0.dist-info → infrahub_server-1.2.11.dist-info}/LICENSE.txt +0 -0
  78. {infrahub_server-1.2.9rc0.dist-info → infrahub_server-1.2.11.dist-info}/WHEEL +0 -0
  79. {infrahub_server-1.2.9rc0.dist-info → infrahub_server-1.2.11.dist-info}/entry_points.txt +0 -0
@@ -35,6 +35,9 @@ from .models import FlowLogs, FlowProgress, RelatedNodesInfo
35
35
 
36
36
  log = get_logger()
37
37
 
38
+ NB_LOGS_LIMIT = 10_000
39
+ PREFECT_MAX_LOGS_PER_CALL = 200
40
+
38
41
 
39
42
  class PrefectTask:
40
43
  @classmethod
@@ -83,9 +86,42 @@ class PrefectTask:
83
86
  return related_nodes
84
87
 
85
88
  @classmethod
86
- async def _get_logs(cls, client: PrefectClient, flow_ids: list[UUID]) -> FlowLogs:
89
+ async def _get_logs(
90
+ cls, client: PrefectClient, flow_ids: list[UUID], log_limit: int | None, log_offset: int | None
91
+ ) -> FlowLogs:
92
+ """
93
+ Return the logs for a flow run, based on log_limit and log_offset.
94
+ At most, NB_LOGS_LIMIT logs will be returned per flow.
95
+ """
96
+
87
97
  logs_flow = FlowLogs()
88
- all_logs = await client.read_logs(log_filter=LogFilter(flow_run_id=LogFilterFlowRunId(any_=flow_ids)))
98
+
99
+ log_limit = log_limit if log_limit is not None else NB_LOGS_LIMIT
100
+ log_offset = log_offset or 0
101
+ current_offset = log_offset
102
+
103
+ if log_limit > NB_LOGS_LIMIT:
104
+ raise ValueError(f"log_limit cannot be greater than {NB_LOGS_LIMIT}")
105
+
106
+ all_logs = []
107
+
108
+ # Fetch the logs in batches of PREFECT_MAX_LOGS_PER_CALL, as prefect does not allow to fetch more logs at once.
109
+ remaining = min(log_limit, NB_LOGS_LIMIT)
110
+ while remaining > 0:
111
+ batch_limit = min(PREFECT_MAX_LOGS_PER_CALL, remaining)
112
+ logs_batch = await client.read_logs(
113
+ log_filter=LogFilter(flow_run_id=LogFilterFlowRunId(any_=flow_ids)),
114
+ offset=current_offset,
115
+ limit=batch_limit,
116
+ )
117
+ all_logs.extend(logs_batch)
118
+ nb_fetched = len(logs_batch)
119
+ if nb_fetched < batch_limit:
120
+ break # No more logs to fetch
121
+
122
+ current_offset += nb_fetched
123
+ remaining -= nb_fetched
124
+
89
125
  for flow_log in all_logs:
90
126
  if flow_log.flow_run_id and flow_log.message not in ["Finished in state Completed()"]:
91
127
  logs_flow.logs[flow_log.flow_run_id].append(flow_log)
@@ -188,6 +224,8 @@ class PrefectTask:
188
224
  branch: str | None = None,
189
225
  limit: int | None = None,
190
226
  offset: int | None = None,
227
+ log_limit: int | None = None,
228
+ log_offset: int | None = None,
191
229
  ) -> dict[str, Any]:
192
230
  nodes: list[dict] = []
193
231
  count = None
@@ -219,7 +257,9 @@ class PrefectTask:
219
257
  sort=FlowRunSort.START_TIME_DESC,
220
258
  )
221
259
  if log_fields:
222
- logs_flow = await cls._get_logs(client=client, flow_ids=[flow.id for flow in flows])
260
+ logs_flow = await cls._get_logs(
261
+ client=client, flow_ids=[flow.id for flow in flows], log_limit=log_limit, log_offset=log_offset
262
+ )
223
263
 
224
264
  if "progress" in node_fields:
225
265
  progress_flow = await cls._get_progress(client=client, flow_ids=[flow.id for flow in flows])
@@ -265,7 +305,7 @@ class PrefectTask:
265
305
  "updated_at": flow.updated.to_iso8601_string(), # type: ignore
266
306
  "start_time": flow.start_time.to_iso8601_string() if flow.start_time else None,
267
307
  "id": flow.id,
268
- "logs": {"edges": logs},
308
+ "logs": {"edges": logs, "count": len(logs)},
269
309
  }
270
310
  }
271
311
  )
@@ -49,7 +49,7 @@ async def get_system_info(db: InfrahubDatabase) -> TelemetryDatabaseSystemInfoDa
49
49
 
50
50
  @task(name="telemetry-gather-db", task_run_name="Gather Database Information", cache_policy=NONE)
51
51
  async def gather_database_information(db: InfrahubDatabase) -> TelemetryDatabaseData:
52
- async with db.start_session() as dbs:
52
+ async with db.start_session(read_only=True) as dbs:
53
53
  server_info = []
54
54
  system_info = None
55
55
  database_type = db.db_type.value
@@ -38,7 +38,7 @@ async def gather_schema_information(branch: Branch) -> TelemetrySchemaData:
38
38
 
39
39
  @task(name="telemetry-feature-information", task_run_name="Gather Feature Information", cache_policy=NONE)
40
40
  async def gather_feature_information(service: InfrahubServices) -> dict[str, int]:
41
- async with service.database.start_session() as db:
41
+ async with service.database.start_session(read_only=True) as db:
42
42
  data = {}
43
43
  features_to_count = [
44
44
  InfrahubKind.ARTIFACT,
@@ -5,8 +5,11 @@ from enum import Enum
5
5
  from typing import TYPE_CHECKING, Any
6
6
 
7
7
  from prefect.events.actions import RunDeployment
8
+ from prefect.events.schemas.automations import (
9
+ Automation, # noqa: TC002
10
+ Posture,
11
+ )
8
12
  from prefect.events.schemas.automations import EventTrigger as PrefectEventTrigger
9
- from prefect.events.schemas.automations import Posture
10
13
  from prefect.events.schemas.events import ResourceSpecification
11
14
  from pydantic import BaseModel, Field
12
15
 
@@ -19,6 +22,13 @@ if TYPE_CHECKING:
19
22
  from uuid import UUID
20
23
 
21
24
 
25
+ class TriggerSetupReport(BaseModel):
26
+ created: list[TriggerDefinition] = Field(default_factory=list)
27
+ updated: list[TriggerDefinition] = Field(default_factory=list)
28
+ deleted: list[Automation] = Field(default_factory=list)
29
+ unchanged: list[TriggerDefinition] = Field(default_factory=list)
30
+
31
+
22
32
  class TriggerType(str, Enum):
23
33
  BUILTIN = "builtin"
24
34
  WEBHOOK = "webhook"
infrahub/trigger/setup.py CHANGED
@@ -5,27 +5,43 @@ from prefect.automations import AutomationCore
5
5
  from prefect.cache_policies import NONE
6
6
  from prefect.client.orchestration import PrefectClient
7
7
  from prefect.client.schemas.filters import DeploymentFilter, DeploymentFilterName
8
+ from prefect.events.schemas.automations import Automation
8
9
 
9
10
  from infrahub.trigger.models import TriggerDefinition
10
11
 
11
- from .models import TriggerType
12
+ from .models import TriggerSetupReport, TriggerType
12
13
 
13
14
  if TYPE_CHECKING:
14
15
  from uuid import UUID
15
16
 
16
17
 
18
+ def compare_automations(target: AutomationCore, existing: Automation) -> bool:
19
+ """Compare an AutomationCore with an existing Automation object to identify if they are identical or not
20
+
21
+ Return True if the target is identical to the existing automation
22
+ """
23
+
24
+ target_dump = target.model_dump(exclude_defaults=True, exclude_none=True)
25
+ existing_dump = existing.model_dump(exclude_defaults=True, exclude_none=True, exclude={"id"})
26
+
27
+ return target_dump == existing_dump
28
+
29
+
17
30
  @task(name="trigger-setup", task_run_name="Setup triggers", cache_policy=NONE) # type: ignore[arg-type]
18
31
  async def setup_triggers(
19
32
  client: PrefectClient,
20
33
  triggers: list[TriggerDefinition],
21
34
  trigger_type: TriggerType | None = None,
22
- ) -> None:
35
+ force_update: bool = False,
36
+ ) -> TriggerSetupReport:
23
37
  log = get_run_logger()
24
38
 
39
+ report = TriggerSetupReport()
40
+
25
41
  if trigger_type:
26
- log.info(f"Setting up triggers of type {trigger_type.value}")
42
+ log.debug(f"Setting up triggers of type {trigger_type.value}")
27
43
  else:
28
- log.info("Setting up all triggers")
44
+ log.debug("Setting up all triggers")
29
45
 
30
46
  # -------------------------------------------------------------
31
47
  # Retrieve existing Deployments and Automation from the server
@@ -38,23 +54,24 @@ async def setup_triggers(
38
54
  )
39
55
  }
40
56
  deployments_mapping: dict[str, UUID] = {name: item.id for name, item in deployments.items()}
41
- existing_automations = {item.name: item for item in await client.read_automations()}
42
57
 
43
58
  # If a trigger type is provided, narrow down the list of existing triggers to know which one to delete
59
+ existing_automations: dict[str, Automation] = {}
44
60
  if trigger_type:
45
- trigger_automations = [
46
- item.name for item in await client.read_automations() if item.name.startswith(trigger_type.value)
47
- ]
61
+ existing_automations = {
62
+ item.name: item for item in await client.read_automations() if item.name.startswith(trigger_type.value)
63
+ }
48
64
  else:
49
- trigger_automations = [item.name for item in await client.read_automations()]
65
+ existing_automations = {item.name: item for item in await client.read_automations()}
50
66
 
51
67
  trigger_names = [trigger.generate_name() for trigger in triggers]
68
+ automation_names = list(existing_automations.keys())
52
69
 
53
- log.debug(f"{len(trigger_automations)} existing triggers ({trigger_automations})")
54
- log.debug(f"{len(trigger_names)} triggers to configure ({trigger_names})")
70
+ log.debug(f"{len(automation_names)} existing triggers ({automation_names})")
71
+ log.debug(f"{len(trigger_names)} triggers to configure ({trigger_names})")
55
72
 
56
- to_delete = set(trigger_automations) - set(trigger_names)
57
- log.debug(f"{len(trigger_names)} triggers to delete ({to_delete})")
73
+ to_delete = set(automation_names) - set(trigger_names)
74
+ log.debug(f"{len(to_delete)} triggers to delete ({to_delete})")
58
75
 
59
76
  # -------------------------------------------------------------
60
77
  # Create or Update all triggers
@@ -71,11 +88,16 @@ async def setup_triggers(
71
88
  existing_automation = existing_automations.get(trigger.generate_name(), None)
72
89
 
73
90
  if existing_automation:
74
- await client.update_automation(automation_id=existing_automation.id, automation=automation)
75
- log.info(f"{trigger.generate_name()} Updated")
91
+ if force_update or not compare_automations(target=automation, existing=existing_automation):
92
+ await client.update_automation(automation_id=existing_automation.id, automation=automation)
93
+ log.info(f"{trigger.generate_name()} Updated")
94
+ report.updated.append(trigger)
95
+ else:
96
+ report.unchanged.append(trigger)
76
97
  else:
77
98
  await client.create_automation(automation=automation)
78
99
  log.info(f"{trigger.generate_name()} Created")
100
+ report.created.append(trigger)
79
101
 
80
102
  # -------------------------------------------------------------
81
103
  # Delete Triggers that shouldn't be there
@@ -86,5 +108,19 @@ async def setup_triggers(
86
108
  if not existing_automation:
87
109
  continue
88
110
 
111
+ report.deleted.append(existing_automation)
89
112
  await client.delete_automation(automation_id=existing_automation.id)
90
113
  log.info(f"{item_to_delete} Deleted")
114
+
115
+ if trigger_type:
116
+ log.info(
117
+ f"Processed triggers of type {trigger_type.value}: "
118
+ f"{len(report.created)} created, {len(report.updated)} updated, {len(report.unchanged)} unchanged, {len(report.deleted)} deleted"
119
+ )
120
+ else:
121
+ log.info(
122
+ f"Processed all triggers: "
123
+ f"{len(report.created)} created, {len(report.updated)} updated, {len(report.unchanged)} unchanged, {len(report.deleted)} deleted"
124
+ )
125
+
126
+ return report
infrahub/trigger/tasks.py CHANGED
@@ -31,7 +31,4 @@ async def trigger_configure_all(service: InfrahubServices) -> None:
31
31
  )
32
32
 
33
33
  async with get_client(sync_client=False) as prefect_client:
34
- await setup_triggers(
35
- client=prefect_client,
36
- triggers=triggers,
37
- )
34
+ await setup_triggers(client=prefect_client, triggers=triggers, force_update=True)
infrahub/types.py CHANGED
@@ -366,7 +366,7 @@ ATTRIBUTE_PYTHON_TYPES: dict[str, type] = {
366
366
  ATTRIBUTE_KIND_LABELS = list(ATTRIBUTE_TYPES.keys())
367
367
 
368
368
  # Data types supporting large values, which can therefore not be indexed in neo4j.
369
- LARGE_ATTRIBUTE_TYPES = [TextArea, JSON]
369
+ LARGE_ATTRIBUTE_TYPES = [TextArea, JSON, List]
370
370
 
371
371
 
372
372
  def get_attribute_type(kind: str = "Default") -> type[InfrahubDataType]:
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import base64
4
4
  import hashlib
5
5
  import hmac
6
+ import json
6
7
  from typing import TYPE_CHECKING, Any
7
8
  from uuid import UUID, uuid4
8
9
 
@@ -170,7 +171,7 @@ class StandardWebhook(Webhook):
170
171
  def _assign_headers(self, uuid: UUID | None = None, at: Timestamp | None = None) -> None:
171
172
  message_id = f"msg_{uuid.hex}" if uuid else f"msg_{uuid4().hex}"
172
173
  timestamp = str(at.to_timestamp()) if at else str(Timestamp().to_timestamp())
173
- payload = self._payload or {}
174
+ payload = json.dumps(self._payload or {})
174
175
  unsigned_data = f"{message_id}.{timestamp}.{payload}".encode()
175
176
  signature = self._sign(data=unsigned_data)
176
177
 
@@ -251,6 +251,14 @@ COMPUTED_ATTRIBUTE_PROCESS_JINJA2 = WorkflowDefinition(
251
251
  tags=[WorkflowTag.DATABASE_CHANGE],
252
252
  )
253
253
 
254
+ COMPUTED_ATTRIBUTE_JINJA2_UPDATE_VALUE = WorkflowDefinition(
255
+ name="computed-attribute-jinja2-update-value",
256
+ type=WorkflowType.CORE,
257
+ module="infrahub.computed_attribute.tasks",
258
+ function="computed_attribute_jinja2_update_value",
259
+ tags=[WorkflowTag.DATABASE_CHANGE],
260
+ )
261
+
254
262
  TRIGGER_UPDATE_JINJA_COMPUTED_ATTRIBUTES = WorkflowDefinition(
255
263
  name="trigger_update_jinja2_computed_attributes",
256
264
  type=WorkflowType.CORE,
@@ -443,6 +451,7 @@ workflows = [
443
451
  BRANCH_MERGE_POST_PROCESS,
444
452
  BRANCH_REBASE,
445
453
  BRANCH_VALIDATE,
454
+ COMPUTED_ATTRIBUTE_JINJA2_UPDATE_VALUE,
446
455
  COMPUTED_ATTRIBUTE_PROCESS_JINJA2,
447
456
  COMPUTED_ATTRIBUTE_PROCESS_TRANSFORM,
448
457
  COMPUTED_ATTRIBUTE_SETUP_JINJA2,
@@ -71,7 +71,5 @@ async def setup_task_manager() -> None:
71
71
  await setup_worker_pools(client=client)
72
72
  await setup_deployments(client=client)
73
73
  await setup_triggers(
74
- client=client,
75
- triggers=builtin_triggers,
76
- trigger_type=TriggerType.BUILTIN,
74
+ client=client, triggers=builtin_triggers, trigger_type=TriggerType.BUILTIN, force_update=True
77
75
  )
infrahub_sdk/timestamp.py CHANGED
@@ -153,7 +153,7 @@ class Timestamp:
153
153
  nanoseconds: int = 0,
154
154
  disambiguate: Literal["compatible"] = "compatible",
155
155
  ) -> Timestamp:
156
- return Timestamp(
156
+ return self.__class__(
157
157
  self._obj.add(
158
158
  years=years,
159
159
  months=months,
@@ -183,7 +183,7 @@ class Timestamp:
183
183
  nanoseconds: int = 0,
184
184
  disambiguate: Literal["compatible"] = "compatible",
185
185
  ) -> Timestamp:
186
- return Timestamp(
186
+ return self.__class__(
187
187
  self._obj.subtract(
188
188
  years=years,
189
189
  months=months,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: infrahub-server
3
- Version: 1.2.9rc0
3
+ Version: 1.2.11
4
4
  Summary: Infrahub is taking a new approach to Infrastructure Management by providing a new generation of datastore to organize and control all the data that defines how an infrastructure should run.
5
5
  Home-page: https://opsmill.com
6
6
  License: AGPL-3.0-only
@@ -77,7 +77,7 @@ Description-Content-Type: text/markdown
77
77
 
78
78
  Infrahub from [OpsMill](https://opsmill.com) is taking a new approach to Infrastructure Management by providing a new generation of datastore to organize and control all the data that defines how an infrastructure should run. Infrahub offers a central hub to manage the data, templates and playbooks that powers your infrastructure by combining the version control and branch management capabilities similar to Git with the flexible data model and UI of a graph database.
79
79
 
80
- If you just want to try Infrahub out, you can use our [Always-On Sandbox](https://demo.infrahub.app/) to get started.
80
+ If you just want to try Infrahub out, you can use our [Infrahub Sandbox](https://sandbox.infrahub.app/) to get started.
81
81
 
82
82
  ![infrahub screenshot](docs/docs/media/infrahub-readme.gif)
83
83
 
@@ -103,7 +103,7 @@ If you just want to try Infrahub out, you can use our [Always-On Sandbox](https:
103
103
 
104
104
  ## Quick Start
105
105
 
106
- [Always-On Sandbox](https://demo.infrahub.app/) - Instantly login to the UI of a demo environment of Infrahub with sample data pre-loaded.
106
+ [Infrahub Sandbox](https://sandbox.infrahub.app/) - Instantly login to the UI of a demo environment of Infrahub with sample data pre-loaded.
107
107
 
108
108
  [Getting Started Environment & Tutorial](https://opsmill.instruqt.com/pages/labs) - It spins up an instance of Infrahub on our cloud, provides a browser, terminal, code editor and walks you through the basic concepts:
109
109