infrahub-server 1.4.12__py3-none-any.whl → 1.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/actions/tasks.py +208 -16
- infrahub/api/artifact.py +3 -0
- infrahub/api/diff/diff.py +1 -1
- infrahub/api/internal.py +2 -0
- infrahub/api/query.py +2 -0
- infrahub/api/schema.py +27 -3
- infrahub/auth.py +5 -5
- infrahub/cli/__init__.py +2 -0
- infrahub/cli/db.py +160 -157
- infrahub/cli/dev.py +118 -0
- infrahub/cli/tasks.py +46 -0
- infrahub/cli/upgrade.py +56 -9
- infrahub/computed_attribute/tasks.py +19 -7
- infrahub/config.py +7 -2
- infrahub/core/attribute.py +35 -24
- infrahub/core/branch/enums.py +1 -1
- infrahub/core/branch/models.py +9 -5
- infrahub/core/branch/needs_rebase_status.py +11 -0
- infrahub/core/branch/tasks.py +72 -10
- infrahub/core/changelog/models.py +2 -10
- infrahub/core/constants/__init__.py +4 -0
- infrahub/core/constants/infrahubkind.py +1 -0
- infrahub/core/convert_object_type/object_conversion.py +201 -0
- infrahub/core/convert_object_type/repository_conversion.py +89 -0
- infrahub/core/convert_object_type/schema_mapping.py +27 -3
- infrahub/core/diff/calculator.py +2 -2
- infrahub/core/diff/model/path.py +4 -0
- infrahub/core/diff/payload_builder.py +1 -1
- infrahub/core/diff/query/artifact.py +1 -0
- infrahub/core/diff/query/delete_query.py +9 -5
- infrahub/core/diff/query/field_summary.py +1 -0
- infrahub/core/diff/query/merge.py +39 -23
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/initialization.py +7 -4
- infrahub/core/manager.py +3 -81
- infrahub/core/migrations/__init__.py +3 -0
- infrahub/core/migrations/exceptions.py +4 -0
- infrahub/core/migrations/graph/__init__.py +13 -10
- infrahub/core/migrations/graph/load_schema_branch.py +21 -0
- infrahub/core/migrations/graph/m013_convert_git_password_credential.py +1 -1
- infrahub/core/migrations/graph/m037_index_attr_vals.py +11 -30
- infrahub/core/migrations/graph/m039_ipam_reconcile.py +9 -7
- infrahub/core/migrations/graph/m041_deleted_dup_edges.py +149 -0
- infrahub/core/migrations/graph/m042_profile_attrs_in_db.py +147 -0
- infrahub/core/migrations/graph/m043_create_hfid_display_label_in_db.py +164 -0
- infrahub/core/migrations/graph/m044_backfill_hfid_display_label_in_db.py +864 -0
- infrahub/core/migrations/query/__init__.py +7 -8
- infrahub/core/migrations/query/attribute_add.py +8 -6
- infrahub/core/migrations/query/attribute_remove.py +134 -0
- infrahub/core/migrations/runner.py +54 -0
- infrahub/core/migrations/schema/attribute_kind_update.py +9 -3
- infrahub/core/migrations/schema/attribute_supports_profile.py +90 -0
- infrahub/core/migrations/schema/node_attribute_add.py +26 -5
- infrahub/core/migrations/schema/node_attribute_remove.py +13 -109
- infrahub/core/migrations/schema/node_kind_update.py +2 -1
- infrahub/core/migrations/schema/node_remove.py +2 -1
- infrahub/core/migrations/schema/placeholder_dummy.py +3 -2
- infrahub/core/migrations/shared.py +66 -19
- infrahub/core/models.py +2 -2
- infrahub/core/node/__init__.py +207 -54
- infrahub/core/node/create.py +53 -49
- infrahub/core/node/lock_utils.py +124 -0
- infrahub/core/node/node_property_attribute.py +230 -0
- infrahub/core/node/resource_manager/ip_address_pool.py +2 -1
- infrahub/core/node/resource_manager/ip_prefix_pool.py +2 -1
- infrahub/core/node/resource_manager/number_pool.py +2 -1
- infrahub/core/node/standard.py +1 -1
- infrahub/core/property.py +11 -0
- infrahub/core/protocols.py +8 -1
- infrahub/core/query/attribute.py +82 -15
- infrahub/core/query/diff.py +61 -16
- infrahub/core/query/ipam.py +16 -4
- infrahub/core/query/node.py +92 -212
- infrahub/core/query/relationship.py +44 -26
- infrahub/core/query/subquery.py +0 -8
- infrahub/core/relationship/model.py +69 -24
- infrahub/core/schema/__init__.py +56 -0
- infrahub/core/schema/attribute_schema.py +4 -2
- infrahub/core/schema/basenode_schema.py +42 -2
- infrahub/core/schema/definitions/core/__init__.py +2 -0
- infrahub/core/schema/definitions/core/check.py +1 -1
- infrahub/core/schema/definitions/core/generator.py +2 -0
- infrahub/core/schema/definitions/core/group.py +16 -2
- infrahub/core/schema/definitions/core/repository.py +7 -0
- infrahub/core/schema/definitions/core/transform.py +1 -1
- infrahub/core/schema/definitions/internal.py +12 -3
- infrahub/core/schema/generated/attribute_schema.py +2 -2
- infrahub/core/schema/generated/base_node_schema.py +6 -1
- infrahub/core/schema/manager.py +3 -0
- infrahub/core/schema/node_schema.py +1 -0
- infrahub/core/schema/relationship_schema.py +0 -1
- infrahub/core/schema/schema_branch.py +295 -10
- infrahub/core/schema/schema_branch_display.py +135 -0
- infrahub/core/schema/schema_branch_hfid.py +120 -0
- infrahub/core/validators/aggregated_checker.py +1 -1
- infrahub/database/graph.py +21 -0
- infrahub/display_labels/__init__.py +0 -0
- infrahub/display_labels/gather.py +48 -0
- infrahub/display_labels/models.py +240 -0
- infrahub/display_labels/tasks.py +192 -0
- infrahub/display_labels/triggers.py +22 -0
- infrahub/events/branch_action.py +27 -1
- infrahub/events/group_action.py +1 -1
- infrahub/events/node_action.py +1 -1
- infrahub/generators/constants.py +7 -0
- infrahub/generators/models.py +38 -12
- infrahub/generators/tasks.py +34 -16
- infrahub/git/base.py +42 -2
- infrahub/git/integrator.py +22 -14
- infrahub/git/tasks.py +52 -2
- infrahub/graphql/analyzer.py +9 -0
- infrahub/graphql/api/dependencies.py +2 -4
- infrahub/graphql/api/endpoints.py +16 -6
- infrahub/graphql/app.py +2 -4
- infrahub/graphql/initialization.py +2 -3
- infrahub/graphql/manager.py +213 -137
- infrahub/graphql/middleware.py +12 -0
- infrahub/graphql/mutations/branch.py +16 -0
- infrahub/graphql/mutations/computed_attribute.py +110 -3
- infrahub/graphql/mutations/convert_object_type.py +44 -13
- infrahub/graphql/mutations/display_label.py +118 -0
- infrahub/graphql/mutations/generator.py +25 -7
- infrahub/graphql/mutations/hfid.py +125 -0
- infrahub/graphql/mutations/ipam.py +73 -41
- infrahub/graphql/mutations/main.py +61 -178
- infrahub/graphql/mutations/profile.py +195 -0
- infrahub/graphql/mutations/proposed_change.py +8 -1
- infrahub/graphql/mutations/relationship.py +2 -2
- infrahub/graphql/mutations/repository.py +22 -83
- infrahub/graphql/mutations/resource_manager.py +2 -2
- infrahub/graphql/mutations/webhook.py +1 -1
- infrahub/graphql/queries/resource_manager.py +1 -1
- infrahub/graphql/registry.py +173 -0
- infrahub/graphql/resolvers/resolver.py +2 -0
- infrahub/graphql/schema.py +8 -1
- infrahub/graphql/schema_sort.py +170 -0
- infrahub/graphql/types/branch.py +4 -1
- infrahub/graphql/types/enums.py +3 -0
- infrahub/groups/tasks.py +1 -1
- infrahub/hfid/__init__.py +0 -0
- infrahub/hfid/gather.py +48 -0
- infrahub/hfid/models.py +240 -0
- infrahub/hfid/tasks.py +191 -0
- infrahub/hfid/triggers.py +22 -0
- infrahub/lock.py +119 -42
- infrahub/locks/__init__.py +0 -0
- infrahub/locks/tasks.py +37 -0
- infrahub/message_bus/types.py +1 -0
- infrahub/patch/plan_writer.py +2 -2
- infrahub/permissions/constants.py +2 -0
- infrahub/profiles/__init__.py +0 -0
- infrahub/profiles/node_applier.py +101 -0
- infrahub/profiles/queries/__init__.py +0 -0
- infrahub/profiles/queries/get_profile_data.py +98 -0
- infrahub/profiles/tasks.py +63 -0
- infrahub/proposed_change/tasks.py +67 -14
- infrahub/repositories/__init__.py +0 -0
- infrahub/repositories/create_repository.py +113 -0
- infrahub/server.py +9 -1
- infrahub/services/__init__.py +8 -5
- infrahub/services/adapters/http/__init__.py +5 -0
- infrahub/services/adapters/workflow/worker.py +14 -3
- infrahub/task_manager/event.py +5 -0
- infrahub/task_manager/models.py +7 -0
- infrahub/task_manager/task.py +73 -0
- infrahub/tasks/registry.py +6 -4
- infrahub/trigger/catalogue.py +4 -0
- infrahub/trigger/models.py +2 -0
- infrahub/trigger/setup.py +13 -4
- infrahub/trigger/tasks.py +6 -0
- infrahub/webhook/models.py +1 -1
- infrahub/workers/dependencies.py +3 -1
- infrahub/workers/infrahub_async.py +10 -2
- infrahub/workflows/catalogue.py +118 -3
- infrahub/workflows/initialization.py +21 -0
- infrahub/workflows/models.py +17 -2
- infrahub/workflows/utils.py +2 -1
- infrahub_sdk/branch.py +17 -8
- infrahub_sdk/checks.py +1 -1
- infrahub_sdk/client.py +376 -95
- infrahub_sdk/config.py +29 -2
- infrahub_sdk/convert_object_type.py +61 -0
- infrahub_sdk/ctl/branch.py +3 -0
- infrahub_sdk/ctl/check.py +2 -3
- infrahub_sdk/ctl/cli_commands.py +20 -12
- infrahub_sdk/ctl/config.py +8 -2
- infrahub_sdk/ctl/generator.py +6 -3
- infrahub_sdk/ctl/graphql.py +184 -0
- infrahub_sdk/ctl/repository.py +39 -1
- infrahub_sdk/ctl/schema.py +40 -10
- infrahub_sdk/ctl/task.py +110 -0
- infrahub_sdk/ctl/utils.py +4 -0
- infrahub_sdk/ctl/validate.py +5 -3
- infrahub_sdk/diff.py +4 -5
- infrahub_sdk/exceptions.py +2 -0
- infrahub_sdk/generator.py +7 -1
- infrahub_sdk/graphql/__init__.py +12 -0
- infrahub_sdk/graphql/constants.py +1 -0
- infrahub_sdk/graphql/plugin.py +85 -0
- infrahub_sdk/graphql/query.py +77 -0
- infrahub_sdk/{graphql.py → graphql/renderers.py} +88 -75
- infrahub_sdk/graphql/utils.py +40 -0
- infrahub_sdk/node/attribute.py +2 -0
- infrahub_sdk/node/node.py +28 -20
- infrahub_sdk/node/relationship.py +1 -3
- infrahub_sdk/playback.py +1 -2
- infrahub_sdk/protocols.py +54 -6
- infrahub_sdk/pytest_plugin/plugin.py +7 -4
- infrahub_sdk/pytest_plugin/utils.py +40 -0
- infrahub_sdk/repository.py +1 -2
- infrahub_sdk/schema/__init__.py +70 -4
- infrahub_sdk/schema/main.py +1 -0
- infrahub_sdk/schema/repository.py +8 -0
- infrahub_sdk/spec/models.py +7 -0
- infrahub_sdk/spec/object.py +54 -6
- infrahub_sdk/spec/processors/__init__.py +0 -0
- infrahub_sdk/spec/processors/data_processor.py +10 -0
- infrahub_sdk/spec/processors/factory.py +34 -0
- infrahub_sdk/spec/processors/range_expand_processor.py +56 -0
- infrahub_sdk/spec/range_expansion.py +118 -0
- infrahub_sdk/task/models.py +6 -4
- infrahub_sdk/timestamp.py +18 -6
- infrahub_sdk/transforms.py +1 -1
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.5.0.dist-info}/METADATA +9 -10
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.5.0.dist-info}/RECORD +233 -176
- infrahub_testcontainers/container.py +114 -2
- infrahub_testcontainers/docker-compose-cluster.test.yml +5 -0
- infrahub_testcontainers/docker-compose.test.yml +5 -0
- infrahub_testcontainers/models.py +2 -2
- infrahub_testcontainers/performance_test.py +4 -4
- infrahub/core/convert_object_type/conversion.py +0 -134
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.5.0.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.5.0.dist-info}/WHEEL +0 -0
- {infrahub_server-1.4.12.dist-info → infrahub_server-1.5.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from infrahub.events.branch_action import BranchDeletedEvent
|
|
2
|
+
from infrahub.events.schema_action import SchemaUpdatedEvent
|
|
3
|
+
from infrahub.trigger.models import BuiltinTriggerDefinition, EventTrigger, ExecuteWorkflow
|
|
4
|
+
from infrahub.workflows.catalogue import HFID_SETUP
|
|
5
|
+
|
|
6
|
+
TRIGGER_HFID_ALL_SCHEMA = BuiltinTriggerDefinition(
|
|
7
|
+
name="hfid-setup-all",
|
|
8
|
+
trigger=EventTrigger(events={SchemaUpdatedEvent.event_name, BranchDeletedEvent.event_name}),
|
|
9
|
+
actions=[
|
|
10
|
+
ExecuteWorkflow(
|
|
11
|
+
workflow=HFID_SETUP,
|
|
12
|
+
parameters={
|
|
13
|
+
"branch_name": "{{ event.resource['infrahub.branch.name'] }}",
|
|
14
|
+
"event_name": "{{ event.event }}",
|
|
15
|
+
"context": {
|
|
16
|
+
"__prefect_kind": "json",
|
|
17
|
+
"value": {"__prefect_kind": "jinja", "template": "{{ event.payload['context'] | tojson }}"},
|
|
18
|
+
},
|
|
19
|
+
},
|
|
20
|
+
),
|
|
21
|
+
],
|
|
22
|
+
)
|
infrahub/lock.py
CHANGED
|
@@ -5,6 +5,7 @@ import time
|
|
|
5
5
|
import uuid
|
|
6
6
|
from asyncio import Lock as LocalLock
|
|
7
7
|
from asyncio import sleep
|
|
8
|
+
from contextvars import ContextVar
|
|
8
9
|
from typing import TYPE_CHECKING
|
|
9
10
|
|
|
10
11
|
import redis.asyncio as redis
|
|
@@ -25,6 +26,7 @@ registry: InfrahubLockRegistry = None
|
|
|
25
26
|
|
|
26
27
|
|
|
27
28
|
METRIC_PREFIX = "infrahub_lock"
|
|
29
|
+
LOCK_PREFIX = "lock"
|
|
28
30
|
|
|
29
31
|
LOCK_ACQUIRE_TIME_METRICS = Histogram(
|
|
30
32
|
f"{METRIC_PREFIX}_acquire_seconds",
|
|
@@ -49,9 +51,12 @@ GLOBAL_GRAPH_LOCK = "global.graph"
|
|
|
49
51
|
class InfrahubMultiLock:
|
|
50
52
|
"""Context manager to allow multiple locks to be reserved together"""
|
|
51
53
|
|
|
52
|
-
def __init__(
|
|
54
|
+
def __init__(
|
|
55
|
+
self, lock_registry: InfrahubLockRegistry, locks: list[str] | None = None, metrics: bool = True
|
|
56
|
+
) -> None:
|
|
53
57
|
self.registry = lock_registry
|
|
54
58
|
self.locks = locks or []
|
|
59
|
+
self.metrics = metrics
|
|
55
60
|
|
|
56
61
|
async def __aenter__(self):
|
|
57
62
|
await self.acquire()
|
|
@@ -66,11 +71,11 @@ class InfrahubMultiLock:
|
|
|
66
71
|
|
|
67
72
|
async def acquire(self) -> None:
|
|
68
73
|
for lock in self.locks:
|
|
69
|
-
await self.registry.get(name=lock).acquire()
|
|
74
|
+
await self.registry.get(name=lock, metrics=self.metrics).acquire()
|
|
70
75
|
|
|
71
76
|
async def release(self) -> None:
|
|
72
77
|
for lock in reversed(self.locks):
|
|
73
|
-
await self.registry.get(name=lock).release()
|
|
78
|
+
await self.registry.get(name=lock, metrics=self.metrics).release()
|
|
74
79
|
|
|
75
80
|
|
|
76
81
|
class NATSLock:
|
|
@@ -97,10 +102,10 @@ class NATSLock:
|
|
|
97
102
|
while True:
|
|
98
103
|
if await self.do_acquire(token):
|
|
99
104
|
self.token = token
|
|
100
|
-
return
|
|
105
|
+
return
|
|
101
106
|
await sleep(0.1) # default Redis GlobalLock value
|
|
102
107
|
|
|
103
|
-
async def do_acquire(self, token: str) -> bool:
|
|
108
|
+
async def do_acquire(self, token: str) -> bool | None:
|
|
104
109
|
return await self.service.cache.set(key=self.name, value=token, not_exists=True)
|
|
105
110
|
|
|
106
111
|
async def release(self) -> None:
|
|
@@ -122,16 +127,19 @@ class InfrahubLock:
|
|
|
122
127
|
connection: redis.Redis | InfrahubServices | None = None,
|
|
123
128
|
local: bool | None = None,
|
|
124
129
|
in_multi: bool = False,
|
|
130
|
+
metrics: bool = True,
|
|
125
131
|
) -> None:
|
|
126
|
-
self.use_local: bool = local
|
|
132
|
+
self.use_local: bool | None = local
|
|
127
133
|
self.local: LocalLock = None
|
|
128
134
|
self.remote: GlobalLock = None
|
|
129
135
|
self.name: str = name
|
|
130
136
|
self.connection: redis.Redis | None = connection
|
|
131
137
|
self.in_multi: bool = in_multi
|
|
132
138
|
self.lock_type: str = "multi" if self.in_multi else "individual"
|
|
133
|
-
self.
|
|
139
|
+
self._acquire_time: int | None = None
|
|
134
140
|
self.event = asyncio.Event()
|
|
141
|
+
self._recursion_var: ContextVar[int | None] = ContextVar(f"infrahub_lock_recursion_{self.name}", default=None)
|
|
142
|
+
self.metrics = metrics
|
|
135
143
|
|
|
136
144
|
if not self.connection or (self.use_local is None and name.startswith("local.")):
|
|
137
145
|
self.use_local = True
|
|
@@ -139,9 +147,20 @@ class InfrahubLock:
|
|
|
139
147
|
if self.use_local:
|
|
140
148
|
self.local = LocalLock()
|
|
141
149
|
elif config.SETTINGS.cache.driver == config.CacheDriver.Redis:
|
|
142
|
-
self.remote = GlobalLock(redis=self.connection, name=f"
|
|
150
|
+
self.remote = GlobalLock(redis=self.connection, name=f"{LOCK_PREFIX}.{self.name}")
|
|
143
151
|
else:
|
|
144
|
-
self.remote = NATSLock(service=self.connection, name=f"
|
|
152
|
+
self.remote = NATSLock(service=self.connection, name=f"{LOCK_PREFIX}.{self.name}")
|
|
153
|
+
|
|
154
|
+
@property
|
|
155
|
+
def acquire_time(self) -> int:
|
|
156
|
+
if self._acquire_time is not None:
|
|
157
|
+
return self._acquire_time
|
|
158
|
+
|
|
159
|
+
raise ValueError("The lock has not been initialized")
|
|
160
|
+
|
|
161
|
+
@acquire_time.setter
|
|
162
|
+
def acquire_time(self, value: int) -> None:
|
|
163
|
+
self._acquire_time = value
|
|
145
164
|
|
|
146
165
|
async def __aenter__(self):
|
|
147
166
|
await self.acquire()
|
|
@@ -155,21 +174,47 @@ class InfrahubLock:
|
|
|
155
174
|
await self.release()
|
|
156
175
|
|
|
157
176
|
async def acquire(self) -> None:
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
177
|
+
depth = self._recursion_var.get()
|
|
178
|
+
if depth is not None:
|
|
179
|
+
self._recursion_var.set(depth + 1)
|
|
180
|
+
return
|
|
181
|
+
|
|
182
|
+
if self.metrics:
|
|
183
|
+
with LOCK_ACQUIRE_TIME_METRICS.labels(self.name, self.lock_type).time():
|
|
184
|
+
if not self.use_local:
|
|
185
|
+
await self.remote.acquire(token=f"{current_timestamp()}::{WORKER_IDENTITY}")
|
|
186
|
+
else:
|
|
187
|
+
await self.local.acquire()
|
|
188
|
+
elif not self.use_local:
|
|
189
|
+
await self.remote.acquire(token=f"{current_timestamp()}::{WORKER_IDENTITY}")
|
|
190
|
+
else:
|
|
191
|
+
await self.local.acquire()
|
|
192
|
+
|
|
163
193
|
self.acquire_time = time.time_ns()
|
|
164
194
|
self.event.clear()
|
|
195
|
+
self._recursion_var.set(1)
|
|
165
196
|
|
|
166
197
|
async def release(self) -> None:
|
|
167
|
-
|
|
168
|
-
|
|
198
|
+
depth = self._recursion_var.get()
|
|
199
|
+
if depth is None:
|
|
200
|
+
raise RuntimeError("Lock release attempted without ownership context.")
|
|
201
|
+
|
|
202
|
+
if depth > 1:
|
|
203
|
+
self._recursion_var.set(depth - 1)
|
|
204
|
+
return
|
|
205
|
+
|
|
206
|
+
if self.acquire_time is not None:
|
|
207
|
+
duration_ns = time.time_ns() - self.acquire_time
|
|
208
|
+
if self.metrics:
|
|
209
|
+
LOCK_RESERVE_TIME_METRICS.labels(self.name, self.lock_type).observe(duration_ns / 1000000000)
|
|
210
|
+
self.acquire_time = None
|
|
211
|
+
|
|
169
212
|
if not self.use_local:
|
|
170
213
|
await self.remote.release()
|
|
171
214
|
else:
|
|
172
215
|
self.local.release()
|
|
216
|
+
|
|
217
|
+
self._recursion_var.set(None)
|
|
173
218
|
self.event.set()
|
|
174
219
|
|
|
175
220
|
async def locked(self) -> bool:
|
|
@@ -179,9 +224,54 @@ class InfrahubLock:
|
|
|
179
224
|
return self.local.locked()
|
|
180
225
|
|
|
181
226
|
|
|
227
|
+
class LockNameGenerator:
|
|
228
|
+
local = "local"
|
|
229
|
+
_global = "global"
|
|
230
|
+
|
|
231
|
+
def generate_name(self, name: str, namespace: str | None = None, local: bool | None = None) -> str:
|
|
232
|
+
if namespace is None and local is None:
|
|
233
|
+
return name
|
|
234
|
+
|
|
235
|
+
new_name = ""
|
|
236
|
+
if local is True:
|
|
237
|
+
new_name = f"{self.local}."
|
|
238
|
+
elif local is False:
|
|
239
|
+
new_name = f"{self._global}."
|
|
240
|
+
|
|
241
|
+
if namespace is not None:
|
|
242
|
+
new_name += f"{namespace}."
|
|
243
|
+
new_name += name
|
|
244
|
+
|
|
245
|
+
return new_name
|
|
246
|
+
|
|
247
|
+
def unpack_name(self, name: str) -> tuple[str, str | None, bool | None]:
|
|
248
|
+
local = None
|
|
249
|
+
namespace = None
|
|
250
|
+
|
|
251
|
+
parts = name.split(".")
|
|
252
|
+
if parts[0] == self.local:
|
|
253
|
+
local = True
|
|
254
|
+
parts = parts[1:]
|
|
255
|
+
elif parts[0] == self._global:
|
|
256
|
+
local = False
|
|
257
|
+
parts = parts[1:]
|
|
258
|
+
|
|
259
|
+
if len(parts) > 1:
|
|
260
|
+
namespace = parts[0]
|
|
261
|
+
original_name = ".".join(parts[1:])
|
|
262
|
+
else:
|
|
263
|
+
original_name = parts[0]
|
|
264
|
+
|
|
265
|
+
return original_name, namespace, local
|
|
266
|
+
|
|
267
|
+
|
|
182
268
|
class InfrahubLockRegistry:
|
|
183
269
|
def __init__(
|
|
184
|
-
self,
|
|
270
|
+
self,
|
|
271
|
+
token: str | None = None,
|
|
272
|
+
local_only: bool = False,
|
|
273
|
+
service: InfrahubServices | None = None,
|
|
274
|
+
name_generator: LockNameGenerator | None = None,
|
|
185
275
|
) -> None:
|
|
186
276
|
if config.SETTINGS.cache.enable and not local_only:
|
|
187
277
|
if config.SETTINGS.cache.driver == config.CacheDriver.Redis:
|
|
@@ -201,23 +291,7 @@ class InfrahubLockRegistry:
|
|
|
201
291
|
|
|
202
292
|
self.token = token or str(uuid.uuid4())
|
|
203
293
|
self.locks: dict[str, InfrahubLock] = {}
|
|
204
|
-
|
|
205
|
-
@classmethod
|
|
206
|
-
def _generate_name(cls, name: str, namespace: str | None = None, local: bool | None = None) -> str:
|
|
207
|
-
if namespace is None and local is None:
|
|
208
|
-
return name
|
|
209
|
-
|
|
210
|
-
new_name = ""
|
|
211
|
-
if local is True:
|
|
212
|
-
new_name = "local."
|
|
213
|
-
elif local is False:
|
|
214
|
-
new_name = "global."
|
|
215
|
-
|
|
216
|
-
if namespace is not None:
|
|
217
|
-
new_name += f"{namespace}."
|
|
218
|
-
new_name += name
|
|
219
|
-
|
|
220
|
-
return new_name
|
|
294
|
+
self.name_generator = name_generator or LockNameGenerator()
|
|
221
295
|
|
|
222
296
|
def get_existing(
|
|
223
297
|
self,
|
|
@@ -225,17 +299,24 @@ class InfrahubLockRegistry:
|
|
|
225
299
|
namespace: str | None,
|
|
226
300
|
local: bool | None = None,
|
|
227
301
|
) -> InfrahubLock | None:
|
|
228
|
-
lock_name = self.
|
|
302
|
+
lock_name = self.name_generator.generate_name(name=name, namespace=namespace, local=local)
|
|
229
303
|
if lock_name not in self.locks:
|
|
230
304
|
return None
|
|
231
305
|
return self.locks[lock_name]
|
|
232
306
|
|
|
233
307
|
def get(
|
|
234
|
-
self,
|
|
308
|
+
self,
|
|
309
|
+
name: str,
|
|
310
|
+
namespace: str | None = None,
|
|
311
|
+
local: bool | None = None,
|
|
312
|
+
in_multi: bool = False,
|
|
313
|
+
metrics: bool = True,
|
|
235
314
|
) -> InfrahubLock:
|
|
236
|
-
lock_name = self.
|
|
315
|
+
lock_name = self.name_generator.generate_name(name=name, namespace=namespace, local=local)
|
|
237
316
|
if lock_name not in self.locks:
|
|
238
|
-
self.locks[lock_name] = InfrahubLock(
|
|
317
|
+
self.locks[lock_name] = InfrahubLock(
|
|
318
|
+
name=lock_name, connection=self.connection, in_multi=in_multi, metrics=metrics
|
|
319
|
+
)
|
|
239
320
|
return self.locks[lock_name]
|
|
240
321
|
|
|
241
322
|
def local_schema_lock(self) -> LocalLock:
|
|
@@ -257,7 +338,3 @@ class InfrahubLockRegistry:
|
|
|
257
338
|
def initialize_lock(local_only: bool = False, service: InfrahubServices | None = None) -> None:
|
|
258
339
|
global registry
|
|
259
340
|
registry = InfrahubLockRegistry(local_only=local_only, service=service)
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
def build_object_lock_name(name: str) -> str:
|
|
263
|
-
return f"global.object.{name}"
|
|
File without changes
|
infrahub/locks/tasks.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from prefect import flow
|
|
4
|
+
from prefect.logging import get_run_logger
|
|
5
|
+
|
|
6
|
+
from infrahub import config
|
|
7
|
+
from infrahub.core.registry import registry
|
|
8
|
+
from infrahub.core.timestamp import Timestamp
|
|
9
|
+
from infrahub.lock import LOCK_PREFIX
|
|
10
|
+
from infrahub.services import InfrahubServices # noqa: TC001 needed for prefect flow
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@flow(
|
|
14
|
+
name="clean-up-deadlocks",
|
|
15
|
+
flow_run_name="Clean up deadlocks",
|
|
16
|
+
)
|
|
17
|
+
async def clean_up_deadlocks(service: InfrahubServices) -> None:
|
|
18
|
+
"""Remove stale distributed locks left behind by inactive workers"""
|
|
19
|
+
keys = await service.cache.list_keys(filter_pattern=f"{LOCK_PREFIX}*")
|
|
20
|
+
if not keys:
|
|
21
|
+
return
|
|
22
|
+
|
|
23
|
+
log = get_run_logger()
|
|
24
|
+
values = await service.cache.get_values(keys=keys)
|
|
25
|
+
workers = await service.component.list_workers(branch=registry.default_branch, schema_hash=False)
|
|
26
|
+
workers_active = {worker.id for worker in workers if worker.active}
|
|
27
|
+
|
|
28
|
+
for key, value in zip(keys, values, strict=False):
|
|
29
|
+
if not key or not value:
|
|
30
|
+
continue
|
|
31
|
+
|
|
32
|
+
timestamp, worker_id = value.split("::", 1)
|
|
33
|
+
if worker_id not in workers_active and Timestamp() > Timestamp(timestamp).add(
|
|
34
|
+
minutes=config.SETTINGS.cache.clean_up_deadlocks_interval_mins
|
|
35
|
+
):
|
|
36
|
+
await service.cache.delete(key)
|
|
37
|
+
log.info(f"Deleted deadlock key={key} worker={worker_id}")
|
infrahub/message_bus/types.py
CHANGED
|
@@ -92,6 +92,7 @@ class ProposedChangeArtifactDefinition(BaseModel):
|
|
|
92
92
|
query_name: str # Deprecated
|
|
93
93
|
query_id: str
|
|
94
94
|
query_models: list[str]
|
|
95
|
+
query_payload: str = Field(..., description="GraphQL query")
|
|
95
96
|
repository_id: str
|
|
96
97
|
transform_kind: str
|
|
97
98
|
template_path: str = Field(default="")
|
infrahub/patch/plan_writer.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from dataclasses import asdict
|
|
3
|
-
from datetime import
|
|
3
|
+
from datetime import UTC, datetime
|
|
4
4
|
from pathlib import Path
|
|
5
5
|
from typing import Any
|
|
6
6
|
|
|
@@ -10,7 +10,7 @@ from .models import EdgeToAdd, EdgeToDelete, EdgeToUpdate, PatchPlan, VertexToAd
|
|
|
10
10
|
|
|
11
11
|
class PatchPlanWriter:
|
|
12
12
|
def write(self, patches_directory: Path, patch_plan: PatchPlan) -> Path:
|
|
13
|
-
timestamp_str = datetime.now(tz=
|
|
13
|
+
timestamp_str = datetime.now(tz=UTC).strftime("%Y%m%d-%H%M%S")
|
|
14
14
|
patch_name = f"patch-{patch_plan.name}-{timestamp_str}"
|
|
15
15
|
patch_plan_directory = patches_directory / Path(patch_name)
|
|
16
16
|
if not patch_plan_directory.exists():
|
|
@@ -30,6 +30,7 @@ GLOBAL_PERMISSION_DENIAL_MESSAGE = {
|
|
|
30
30
|
GlobalPermissions.MANAGE_ACCOUNTS.value: "You are not allowed to manage user accounts, groups or roles",
|
|
31
31
|
GlobalPermissions.MANAGE_PERMISSIONS.value: "You are not allowed to manage permissions",
|
|
32
32
|
GlobalPermissions.MANAGE_REPOSITORIES.value: "You are not allowed to manage repositories",
|
|
33
|
+
GlobalPermissions.UPDATE_OBJECT_HFID_DISPLAY_LABEL.value: "You are not allowed to update human friendly IDs and display labels ad hoc",
|
|
33
34
|
}
|
|
34
35
|
|
|
35
36
|
GLOBAL_PERMISSION_DESCRIPTION = {
|
|
@@ -42,4 +43,5 @@ GLOBAL_PERMISSION_DESCRIPTION = {
|
|
|
42
43
|
GlobalPermissions.MANAGE_PERMISSIONS: "Allow a user to manage permissions",
|
|
43
44
|
GlobalPermissions.MANAGE_REPOSITORIES: "Allow a user to manage repositories",
|
|
44
45
|
GlobalPermissions.SUPER_ADMIN: "Allow a user to do anything",
|
|
46
|
+
GlobalPermissions.UPDATE_OBJECT_HFID_DISPLAY_LABEL: "Allow a user to update objects' display labels and human friendly IDs ad hoc",
|
|
45
47
|
}
|
|
File without changes
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from infrahub.core.attribute import BaseAttribute
|
|
4
|
+
from infrahub.core.branch import Branch
|
|
5
|
+
from infrahub.core.node import Node
|
|
6
|
+
from infrahub.database import InfrahubDatabase
|
|
7
|
+
|
|
8
|
+
from .queries.get_profile_data import GetProfileDataQuery, ProfileData
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class NodeProfilesApplier:
|
|
12
|
+
def __init__(self, db: InfrahubDatabase, branch: Branch):
|
|
13
|
+
self.db = db
|
|
14
|
+
self.branch = branch
|
|
15
|
+
|
|
16
|
+
async def _get_profile_ids(self, node: Node) -> list[str]:
|
|
17
|
+
try:
|
|
18
|
+
profiles_rel = node.get_relationship("profiles")
|
|
19
|
+
except ValueError:
|
|
20
|
+
return []
|
|
21
|
+
profile_rels = await profiles_rel.get_relationships(db=self.db)
|
|
22
|
+
return [pr.peer_id for pr in profile_rels if pr.peer_id]
|
|
23
|
+
|
|
24
|
+
async def _get_attr_names_for_profiles(self, node: Node) -> list[str]:
|
|
25
|
+
node_schema = node.get_schema()
|
|
26
|
+
|
|
27
|
+
# get the names of attributes that could be affected by profile changes
|
|
28
|
+
attr_names_for_profiles: list[str] = []
|
|
29
|
+
for attr_schema in node_schema.attributes:
|
|
30
|
+
attr_name = attr_schema.name
|
|
31
|
+
node_attr: BaseAttribute = getattr(node, attr_name)
|
|
32
|
+
if node_attr.is_from_profile or node_attr.is_default:
|
|
33
|
+
attr_names_for_profiles.append(attr_name)
|
|
34
|
+
return attr_names_for_profiles
|
|
35
|
+
|
|
36
|
+
async def _get_sorted_profile_data(
|
|
37
|
+
self, profile_ids: list[str], attr_names_for_profiles: list[str]
|
|
38
|
+
) -> list[ProfileData]:
|
|
39
|
+
if not profile_ids:
|
|
40
|
+
return []
|
|
41
|
+
query = await GetProfileDataQuery.init(
|
|
42
|
+
db=self.db, branch=self.branch, profile_ids=profile_ids, attr_names=attr_names_for_profiles
|
|
43
|
+
)
|
|
44
|
+
await query.execute(db=self.db)
|
|
45
|
+
profile_data_list = query.get_profile_data()
|
|
46
|
+
return sorted(profile_data_list, key=lambda x: (x.priority, x.uuid))
|
|
47
|
+
|
|
48
|
+
def _apply_profile_to_attribute(self, node_attr: BaseAttribute, profile_value: Any, profile_id: str) -> bool:
|
|
49
|
+
is_changed = False
|
|
50
|
+
if node_attr.value != profile_value:
|
|
51
|
+
node_attr.value = profile_value
|
|
52
|
+
is_changed = True
|
|
53
|
+
if node_attr.is_default is not False:
|
|
54
|
+
node_attr.is_default = False
|
|
55
|
+
is_changed = True
|
|
56
|
+
if node_attr.is_from_profile is not True:
|
|
57
|
+
node_attr.is_from_profile = True
|
|
58
|
+
is_changed = True
|
|
59
|
+
if node_attr.source_id != profile_id: # type: ignore[attr-defined]
|
|
60
|
+
node_attr.set_source(value=profile_id)
|
|
61
|
+
is_changed = True
|
|
62
|
+
return is_changed
|
|
63
|
+
|
|
64
|
+
def _remove_profile_from_attribute(self, node_attr: BaseAttribute) -> None:
|
|
65
|
+
node_attr.clear_source()
|
|
66
|
+
node_attr.value = node_attr.schema.default_value
|
|
67
|
+
node_attr.is_default = True
|
|
68
|
+
node_attr.is_from_profile = False
|
|
69
|
+
|
|
70
|
+
async def apply_profiles(self, node: Node) -> list[str]:
|
|
71
|
+
profile_ids = await self._get_profile_ids(node=node)
|
|
72
|
+
attr_names_for_profiles = await self._get_attr_names_for_profiles(node=node)
|
|
73
|
+
|
|
74
|
+
if not attr_names_for_profiles:
|
|
75
|
+
return []
|
|
76
|
+
|
|
77
|
+
# get profiles priorities and attribute values on branch
|
|
78
|
+
sorted_profile_data = await self._get_sorted_profile_data(
|
|
79
|
+
profile_ids=profile_ids, attr_names_for_profiles=attr_names_for_profiles
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
updated_field_names = []
|
|
83
|
+
# set attribute values/is_default/is_from_profile on nodes
|
|
84
|
+
for attr_name in attr_names_for_profiles:
|
|
85
|
+
has_profile_data = False
|
|
86
|
+
node_attr = node.get_attribute(attr_name)
|
|
87
|
+
for profile_data in sorted_profile_data:
|
|
88
|
+
profile_value = profile_data.attribute_values.get(attr_name)
|
|
89
|
+
if profile_value is not None:
|
|
90
|
+
has_profile_data = True
|
|
91
|
+
is_changed = False
|
|
92
|
+
is_changed = self._apply_profile_to_attribute(
|
|
93
|
+
node_attr=node_attr, profile_value=profile_value, profile_id=profile_data.uuid
|
|
94
|
+
)
|
|
95
|
+
if is_changed:
|
|
96
|
+
updated_field_names.append(attr_name)
|
|
97
|
+
break
|
|
98
|
+
if not has_profile_data and node_attr.is_from_profile:
|
|
99
|
+
self._remove_profile_from_attribute(node_attr=node_attr)
|
|
100
|
+
updated_field_names.append(attr_name)
|
|
101
|
+
return updated_field_names
|
|
File without changes
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from infrahub.core.constants import NULL_VALUE
|
|
5
|
+
from infrahub.core.query import Query, QueryType
|
|
6
|
+
from infrahub.database import InfrahubDatabase
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class ProfileData:
|
|
11
|
+
uuid: str
|
|
12
|
+
priority: float | int
|
|
13
|
+
attribute_values: dict[str, Any]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class GetProfileDataQuery(Query):
|
|
17
|
+
type: QueryType = QueryType.READ
|
|
18
|
+
insert_return: bool = False
|
|
19
|
+
|
|
20
|
+
def __init__(self, *args: Any, profile_ids: list[str], attr_names: list[str], **kwargs: Any):
|
|
21
|
+
super().__init__(*args, **kwargs)
|
|
22
|
+
self.profile_ids = profile_ids
|
|
23
|
+
self.attr_names = attr_names
|
|
24
|
+
|
|
25
|
+
async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
|
|
26
|
+
branch_filter, branch_params = self.branch.get_query_filter_path(at=self.at.to_string())
|
|
27
|
+
self.params.update(branch_params)
|
|
28
|
+
self.params["profile_ids"] = self.profile_ids
|
|
29
|
+
self.params["attr_names"] = self.attr_names + ["profile_priority"]
|
|
30
|
+
|
|
31
|
+
query = """
|
|
32
|
+
// --------------
|
|
33
|
+
// get the Profile nodes
|
|
34
|
+
// --------------
|
|
35
|
+
MATCH (profile:Node)
|
|
36
|
+
WHERE profile.uuid IN $profile_ids
|
|
37
|
+
// --------------
|
|
38
|
+
// make sure we only use the active ones
|
|
39
|
+
// --------------
|
|
40
|
+
CALL (profile) {
|
|
41
|
+
MATCH (profile)-[r:IS_PART_OF]->(:Root)
|
|
42
|
+
WHERE %(branch_filter)s
|
|
43
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
44
|
+
RETURN r.status = "active" AS is_active
|
|
45
|
+
}
|
|
46
|
+
WITH profile
|
|
47
|
+
WHERE is_active = TRUE
|
|
48
|
+
// --------------
|
|
49
|
+
// get the attributes that we care about
|
|
50
|
+
// --------------
|
|
51
|
+
MATCH (profile)-[:HAS_ATTRIBUTE]-(attr:Attribute)
|
|
52
|
+
WHERE attr.name IN $attr_names
|
|
53
|
+
WITH DISTINCT profile, attr
|
|
54
|
+
CALL (profile, attr) {
|
|
55
|
+
MATCH (profile)-[r:HAS_ATTRIBUTE]->(attr)
|
|
56
|
+
WHERE %(branch_filter)s
|
|
57
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
58
|
+
RETURN r.status = "active" AS is_active
|
|
59
|
+
}
|
|
60
|
+
WITH profile, attr
|
|
61
|
+
WHERE is_active = TRUE
|
|
62
|
+
// --------------
|
|
63
|
+
// get the attribute values
|
|
64
|
+
// --------------
|
|
65
|
+
CALL (attr) {
|
|
66
|
+
MATCH (attr)-[r:HAS_VALUE]->(av)
|
|
67
|
+
WHERE %(branch_filter)s
|
|
68
|
+
ORDER BY r.branch_level DESC, r.from DESC, r.status ASC
|
|
69
|
+
RETURN av, r.status = "active" AS is_active
|
|
70
|
+
LIMIT 1
|
|
71
|
+
}
|
|
72
|
+
WITH profile, attr, av
|
|
73
|
+
WHERE is_active = TRUE
|
|
74
|
+
RETURN profile.uuid AS profile_uuid, attr.name AS attr_name, av.value AS attr_value
|
|
75
|
+
""" % {"branch_filter": branch_filter}
|
|
76
|
+
self.add_to_query(query)
|
|
77
|
+
self.return_labels = ["profile_uuid", "attr_name", "attr_value"]
|
|
78
|
+
|
|
79
|
+
def get_profile_data(self) -> list[ProfileData]:
|
|
80
|
+
profile_data_by_uuid: dict[str, ProfileData] = {}
|
|
81
|
+
for result in self.results:
|
|
82
|
+
profile_uuid = result.get_as_type(label="profile_uuid", return_type=str)
|
|
83
|
+
if profile_uuid not in profile_data_by_uuid:
|
|
84
|
+
profile_data_by_uuid[profile_uuid] = ProfileData(
|
|
85
|
+
uuid=profile_uuid, priority=float("inf"), attribute_values={}
|
|
86
|
+
)
|
|
87
|
+
profile_data = profile_data_by_uuid[profile_uuid]
|
|
88
|
+
attr_name = result.get_as_type(label="attr_name", return_type=str)
|
|
89
|
+
attr_value: Any = result.get(label="attr_value")
|
|
90
|
+
if attr_value == NULL_VALUE:
|
|
91
|
+
attr_value = None
|
|
92
|
+
if attr_name == "profile_priority":
|
|
93
|
+
if attr_value is not None and not isinstance(attr_value, int):
|
|
94
|
+
attr_value = int(attr_value)
|
|
95
|
+
profile_data.priority = attr_value
|
|
96
|
+
else:
|
|
97
|
+
profile_data.attribute_values[attr_name] = attr_value
|
|
98
|
+
return list(profile_data_by_uuid.values())
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from prefect import flow
|
|
4
|
+
from prefect.logging import get_run_logger
|
|
5
|
+
|
|
6
|
+
from infrahub.workers.dependencies import get_client, get_workflow
|
|
7
|
+
from infrahub.workflows.catalogue import PROFILE_REFRESH
|
|
8
|
+
from infrahub.workflows.utils import add_tags
|
|
9
|
+
|
|
10
|
+
REFRESH_PROFILES_MUTATION = """
|
|
11
|
+
mutation RefreshProfiles(
|
|
12
|
+
$id: String!,
|
|
13
|
+
) {
|
|
14
|
+
InfrahubProfilesRefresh(
|
|
15
|
+
data: {id: $id}
|
|
16
|
+
) {
|
|
17
|
+
ok
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@flow(
|
|
24
|
+
name="object-profiles-refresh",
|
|
25
|
+
flow_run_name="Refresh profiles for {node_id}",
|
|
26
|
+
)
|
|
27
|
+
async def object_profiles_refresh(
|
|
28
|
+
branch_name: str,
|
|
29
|
+
node_id: str,
|
|
30
|
+
) -> None:
|
|
31
|
+
log = get_run_logger()
|
|
32
|
+
client = get_client()
|
|
33
|
+
|
|
34
|
+
await add_tags(branches=[branch_name], nodes=[node_id], db_change=True)
|
|
35
|
+
await client.execute_graphql(
|
|
36
|
+
query=REFRESH_PROFILES_MUTATION,
|
|
37
|
+
variables={"id": node_id},
|
|
38
|
+
branch_name=branch_name,
|
|
39
|
+
)
|
|
40
|
+
log.info(f"Profiles refreshed for {node_id}")
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@flow(
|
|
44
|
+
name="objects-profiles-refresh-multiple",
|
|
45
|
+
flow_run_name="Refresh profiles for multiple objects",
|
|
46
|
+
)
|
|
47
|
+
async def objects_profiles_refresh_multiple(
|
|
48
|
+
branch_name: str,
|
|
49
|
+
node_ids: list[str],
|
|
50
|
+
) -> None:
|
|
51
|
+
log = get_run_logger()
|
|
52
|
+
|
|
53
|
+
await add_tags(branches=[branch_name])
|
|
54
|
+
|
|
55
|
+
for node_id in node_ids:
|
|
56
|
+
log.info(f"Requesting profile refresh for {node_id}")
|
|
57
|
+
await get_workflow().submit_workflow(
|
|
58
|
+
workflow=PROFILE_REFRESH,
|
|
59
|
+
parameters={
|
|
60
|
+
"branch_name": branch_name,
|
|
61
|
+
"node_id": node_id,
|
|
62
|
+
},
|
|
63
|
+
)
|