infrahub-server 1.5.0b1__py3-none-any.whl → 1.5.0b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/api/internal.py +2 -0
- infrahub/api/oauth2.py +13 -19
- infrahub/api/oidc.py +15 -21
- infrahub/api/schema.py +24 -3
- infrahub/artifacts/models.py +2 -1
- infrahub/auth.py +137 -3
- infrahub/cli/__init__.py +2 -0
- infrahub/cli/db.py +83 -102
- infrahub/cli/dev.py +118 -0
- infrahub/cli/tasks.py +46 -0
- infrahub/cli/upgrade.py +30 -3
- infrahub/computed_attribute/tasks.py +20 -8
- infrahub/core/attribute.py +10 -2
- infrahub/core/branch/enums.py +1 -1
- infrahub/core/branch/models.py +7 -3
- infrahub/core/branch/tasks.py +68 -7
- infrahub/core/constants/__init__.py +3 -0
- infrahub/core/diff/query/artifact.py +1 -0
- infrahub/core/diff/query/field_summary.py +1 -0
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/initialization.py +5 -2
- infrahub/core/migrations/__init__.py +3 -0
- infrahub/core/migrations/exceptions.py +4 -0
- infrahub/core/migrations/graph/__init__.py +10 -13
- infrahub/core/migrations/graph/load_schema_branch.py +21 -0
- infrahub/core/migrations/graph/m013_convert_git_password_credential.py +1 -1
- infrahub/core/migrations/graph/m040_duplicated_attributes.py +81 -0
- infrahub/core/migrations/graph/m041_profile_attrs_in_db.py +145 -0
- infrahub/core/migrations/graph/m042_create_hfid_display_label_in_db.py +164 -0
- infrahub/core/migrations/graph/m043_backfill_hfid_display_label_in_db.py +866 -0
- infrahub/core/migrations/query/__init__.py +7 -8
- infrahub/core/migrations/query/attribute_add.py +8 -6
- infrahub/core/migrations/query/attribute_remove.py +134 -0
- infrahub/core/migrations/runner.py +54 -0
- infrahub/core/migrations/schema/attribute_kind_update.py +9 -3
- infrahub/core/migrations/schema/attribute_supports_profile.py +90 -0
- infrahub/core/migrations/schema/node_attribute_add.py +30 -2
- infrahub/core/migrations/schema/node_attribute_remove.py +13 -109
- infrahub/core/migrations/schema/node_kind_update.py +2 -1
- infrahub/core/migrations/schema/node_remove.py +2 -1
- infrahub/core/migrations/schema/placeholder_dummy.py +3 -2
- infrahub/core/migrations/shared.py +48 -14
- infrahub/core/node/__init__.py +16 -11
- infrahub/core/node/create.py +46 -63
- infrahub/core/node/lock_utils.py +70 -44
- infrahub/core/node/resource_manager/ip_address_pool.py +2 -1
- infrahub/core/node/resource_manager/ip_prefix_pool.py +2 -1
- infrahub/core/node/resource_manager/number_pool.py +2 -1
- infrahub/core/query/attribute.py +55 -0
- infrahub/core/query/ipam.py +1 -0
- infrahub/core/query/node.py +9 -3
- infrahub/core/query/relationship.py +1 -0
- infrahub/core/schema/__init__.py +56 -0
- infrahub/core/schema/attribute_schema.py +4 -0
- infrahub/core/schema/definitions/internal.py +2 -2
- infrahub/core/schema/generated/attribute_schema.py +2 -2
- infrahub/core/schema/manager.py +22 -1
- infrahub/core/schema/schema_branch.py +180 -22
- infrahub/database/graph.py +21 -0
- infrahub/display_labels/tasks.py +13 -7
- infrahub/events/branch_action.py +27 -1
- infrahub/generators/tasks.py +3 -7
- infrahub/git/base.py +4 -1
- infrahub/git/integrator.py +1 -1
- infrahub/git/models.py +2 -1
- infrahub/git/repository.py +22 -5
- infrahub/git/tasks.py +66 -10
- infrahub/git/utils.py +123 -1
- infrahub/graphql/api/endpoints.py +14 -4
- infrahub/graphql/manager.py +4 -9
- infrahub/graphql/mutations/convert_object_type.py +11 -1
- infrahub/graphql/mutations/display_label.py +17 -10
- infrahub/graphql/mutations/hfid.py +17 -10
- infrahub/graphql/mutations/ipam.py +54 -35
- infrahub/graphql/mutations/main.py +27 -28
- infrahub/graphql/schema_sort.py +170 -0
- infrahub/graphql/types/branch.py +4 -1
- infrahub/graphql/types/enums.py +3 -0
- infrahub/hfid/tasks.py +13 -7
- infrahub/lock.py +52 -12
- infrahub/message_bus/types.py +2 -1
- infrahub/permissions/constants.py +2 -0
- infrahub/proposed_change/tasks.py +25 -16
- infrahub/server.py +6 -2
- infrahub/services/__init__.py +2 -2
- infrahub/services/adapters/http/__init__.py +5 -0
- infrahub/services/adapters/workflow/worker.py +14 -3
- infrahub/task_manager/event.py +5 -0
- infrahub/task_manager/models.py +7 -0
- infrahub/task_manager/task.py +73 -0
- infrahub/trigger/setup.py +13 -4
- infrahub/trigger/tasks.py +3 -0
- infrahub/workers/dependencies.py +10 -1
- infrahub/workers/infrahub_async.py +10 -2
- infrahub/workflows/catalogue.py +8 -0
- infrahub/workflows/initialization.py +5 -0
- infrahub/workflows/utils.py +2 -1
- infrahub_sdk/client.py +13 -10
- infrahub_sdk/config.py +29 -2
- infrahub_sdk/ctl/schema.py +22 -7
- infrahub_sdk/schema/__init__.py +32 -4
- infrahub_sdk/spec/models.py +7 -0
- infrahub_sdk/spec/object.py +37 -102
- infrahub_sdk/spec/processors/__init__.py +0 -0
- infrahub_sdk/spec/processors/data_processor.py +10 -0
- infrahub_sdk/spec/processors/factory.py +34 -0
- infrahub_sdk/spec/processors/range_expand_processor.py +56 -0
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/METADATA +3 -1
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/RECORD +115 -101
- infrahub_testcontainers/container.py +114 -2
- infrahub_testcontainers/docker-compose-cluster.test.yml +5 -0
- infrahub_testcontainers/docker-compose.test.yml +5 -0
- infrahub/core/migrations/graph/m040_profile_attrs_in_db.py +0 -166
- infrahub/core/migrations/graph/m041_create_hfid_display_label_in_db.py +0 -97
- infrahub/core/migrations/graph/m042_backfill_hfid_display_label_in_db.py +0 -86
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/WHEEL +0 -0
- {infrahub_server-1.5.0b1.dist-info → infrahub_server-1.5.0b2.dist-info}/entry_points.txt +0 -0
|
@@ -20,7 +20,7 @@ from infrahub.lock import InfrahubMultiLock
|
|
|
20
20
|
from infrahub.log import get_logger
|
|
21
21
|
|
|
22
22
|
from ...core.node.create import create_node
|
|
23
|
-
from ...core.node.lock_utils import build_object_lock_name
|
|
23
|
+
from ...core.node.lock_utils import build_object_lock_name, get_lock_names_on_object_mutation
|
|
24
24
|
from .main import DeleteResult, InfrahubMutationMixin, InfrahubMutationOptions, build_graphql_response
|
|
25
25
|
from .node_getter.by_default_filter import MutationNodeGetterByDefaultFilter
|
|
26
26
|
|
|
@@ -108,11 +108,11 @@ class InfrahubIPAddressMutation(InfrahubMutationMixin, Mutation):
|
|
|
108
108
|
super().__init_subclass_with_meta__(_meta=_meta, **options)
|
|
109
109
|
|
|
110
110
|
@staticmethod
|
|
111
|
-
def
|
|
111
|
+
def _get_lock_names(namespace_id: str, branch: Branch) -> list[str]:
|
|
112
112
|
if not branch.is_default:
|
|
113
113
|
# Do not lock on other branches as reconciliation will be performed at least when merging in main branch.
|
|
114
|
-
return
|
|
115
|
-
return build_object_lock_name(InfrahubKind.IPADDRESS + "_" + namespace_id)
|
|
114
|
+
return []
|
|
115
|
+
return [build_object_lock_name(InfrahubKind.IPADDRESS + "_" + namespace_id)]
|
|
116
116
|
|
|
117
117
|
@classmethod
|
|
118
118
|
async def _mutate_create_object_and_reconcile(
|
|
@@ -150,17 +150,13 @@ class InfrahubIPAddressMutation(InfrahubMutationMixin, Mutation):
|
|
|
150
150
|
ip_address = ipaddress.ip_interface(data["address"]["value"])
|
|
151
151
|
namespace_id = await validate_namespace(db=db, branch=branch, data=data)
|
|
152
152
|
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
reconciled_address = await cls._mutate_create_object_and_reconcile(
|
|
157
|
-
data=data, branch=branch, db=dbt, ip_address=ip_address, namespace_id=namespace_id
|
|
158
|
-
)
|
|
159
|
-
else:
|
|
153
|
+
lock_names = cls._get_lock_names(namespace_id, branch)
|
|
154
|
+
async with InfrahubMultiLock(lock_registry=lock.registry, locks=lock_names):
|
|
155
|
+
async with db.start_transaction() as dbt:
|
|
160
156
|
reconciled_address = await cls._mutate_create_object_and_reconcile(
|
|
161
157
|
data=data, branch=branch, db=dbt, ip_address=ip_address, namespace_id=namespace_id
|
|
162
158
|
)
|
|
163
|
-
|
|
159
|
+
graphql_response = await build_graphql_response(info=info, db=dbt, obj=reconciled_address)
|
|
164
160
|
|
|
165
161
|
return reconciled_address, cls(**graphql_response)
|
|
166
162
|
|
|
@@ -206,18 +202,28 @@ class InfrahubIPAddressMutation(InfrahubMutationMixin, Mutation):
|
|
|
206
202
|
namespace = await address.ip_namespace.get_peer(db)
|
|
207
203
|
namespace_id = await validate_namespace(db=db, branch=branch, data=data, existing_namespace_id=namespace.id)
|
|
208
204
|
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
205
|
+
# Prepare a clone to compute locks without triggering pool allocations
|
|
206
|
+
preview_obj = await NodeManager.get_one_by_id_or_default_filter(
|
|
207
|
+
db=db,
|
|
208
|
+
kind=address.get_kind(),
|
|
209
|
+
id=address.get_id(),
|
|
210
|
+
branch=branch,
|
|
211
|
+
)
|
|
212
|
+
await preview_obj.from_graphql(db=db, data=data, process_pools=False)
|
|
213
|
+
|
|
214
|
+
schema_branch = db.schema.get_schema_branch(name=branch.name)
|
|
215
|
+
lock_names = get_lock_names_on_object_mutation(node=preview_obj, schema_branch=schema_branch)
|
|
216
|
+
|
|
217
|
+
namespace_lock_names = cls._get_lock_names(namespace_id, branch)
|
|
218
|
+
async with InfrahubMultiLock(lock_registry=lock.registry, locks=namespace_lock_names):
|
|
219
|
+
# FIXME: do not lock when data does not contain uniqueness constraint fields or resource pool allocations
|
|
220
|
+
async with InfrahubMultiLock(lock_registry=lock.registry, locks=lock_names, metrics=False):
|
|
221
|
+
async with db.start_transaction() as dbt:
|
|
212
222
|
reconciled_address = await cls._mutate_update_object_and_reconcile(
|
|
213
223
|
info=info, data=data, branch=branch, address=address, namespace_id=namespace_id, db=dbt
|
|
214
224
|
)
|
|
215
|
-
else:
|
|
216
|
-
reconciled_address = await cls._mutate_update_object_and_reconcile(
|
|
217
|
-
info=info, data=data, branch=branch, address=address, namespace_id=namespace_id, db=dbt
|
|
218
|
-
)
|
|
219
225
|
|
|
220
|
-
|
|
226
|
+
result = await cls.mutate_update_to_graphql(db=dbt, info=info, obj=reconciled_address)
|
|
221
227
|
|
|
222
228
|
return address, result
|
|
223
229
|
|
|
@@ -269,11 +275,11 @@ class InfrahubIPPrefixMutation(InfrahubMutationMixin, Mutation):
|
|
|
269
275
|
super().__init_subclass_with_meta__(_meta=_meta, **options)
|
|
270
276
|
|
|
271
277
|
@staticmethod
|
|
272
|
-
def
|
|
278
|
+
def _get_lock_names(namespace_id: str) -> list[str]:
|
|
273
279
|
# IPPrefix has some cardinality-one relationships involved (parent/child/ip_address),
|
|
274
280
|
# so we need to lock on any branch to avoid creating multiple peers for these relationships
|
|
275
281
|
# during concurrent ipam reconciliations.
|
|
276
|
-
return build_object_lock_name(InfrahubKind.IPPREFIX + "_" + namespace_id)
|
|
282
|
+
return [build_object_lock_name(InfrahubKind.IPPREFIX + "_" + namespace_id)]
|
|
277
283
|
|
|
278
284
|
@classmethod
|
|
279
285
|
async def _mutate_create_object_and_reconcile(
|
|
@@ -306,9 +312,9 @@ class InfrahubIPPrefixMutation(InfrahubMutationMixin, Mutation):
|
|
|
306
312
|
db = database or graphql_context.db
|
|
307
313
|
namespace_id = await validate_namespace(db=db, branch=branch, data=data)
|
|
308
314
|
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
async with
|
|
315
|
+
lock_names = cls._get_lock_names(namespace_id)
|
|
316
|
+
async with InfrahubMultiLock(lock_registry=lock.registry, locks=lock_names):
|
|
317
|
+
async with db.start_transaction() as dbt:
|
|
312
318
|
reconciled_prefix = await cls._mutate_create_object_and_reconcile(
|
|
313
319
|
data=data, branch=branch, db=dbt, namespace_id=namespace_id
|
|
314
320
|
)
|
|
@@ -356,13 +362,26 @@ class InfrahubIPPrefixMutation(InfrahubMutationMixin, Mutation):
|
|
|
356
362
|
namespace = await prefix.ip_namespace.get_peer(db)
|
|
357
363
|
namespace_id = await validate_namespace(db=db, branch=branch, data=data, existing_namespace_id=namespace.id)
|
|
358
364
|
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
365
|
+
# Prepare a clone to compute locks without triggering pool allocations
|
|
366
|
+
preview_obj = await NodeManager.get_one_by_id_or_default_filter(
|
|
367
|
+
db=db,
|
|
368
|
+
kind=prefix.get_kind(),
|
|
369
|
+
id=prefix.get_id(),
|
|
370
|
+
branch=branch,
|
|
371
|
+
)
|
|
372
|
+
await preview_obj.from_graphql(db=db, data=data, process_pools=False)
|
|
373
|
+
|
|
374
|
+
schema_branch = db.schema.get_schema_branch(name=branch.name)
|
|
375
|
+
lock_names = get_lock_names_on_object_mutation(node=preview_obj, schema_branch=schema_branch)
|
|
376
|
+
|
|
377
|
+
namespace_lock_names = cls._get_lock_names(namespace_id)
|
|
378
|
+
async with InfrahubMultiLock(lock_registry=lock.registry, locks=namespace_lock_names):
|
|
379
|
+
async with InfrahubMultiLock(lock_registry=lock.registry, locks=lock_names, metrics=False):
|
|
380
|
+
async with db.start_transaction() as dbt:
|
|
381
|
+
reconciled_prefix = await cls._mutate_update_object_and_reconcile(
|
|
382
|
+
info=info, data=data, prefix=prefix, db=dbt, namespace_id=namespace_id, branch=branch
|
|
383
|
+
)
|
|
384
|
+
result = await cls.mutate_update_to_graphql(db=dbt, info=info, obj=reconciled_prefix)
|
|
366
385
|
|
|
367
386
|
return prefix, result
|
|
368
387
|
|
|
@@ -421,9 +440,9 @@ class InfrahubIPPrefixMutation(InfrahubMutationMixin, Mutation):
|
|
|
421
440
|
namespace_rels = await prefix.ip_namespace.get_relationships(db=db)
|
|
422
441
|
namespace_id = namespace_rels[0].peer_id
|
|
423
442
|
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
async with
|
|
443
|
+
lock_names = cls._get_lock_names(namespace_id)
|
|
444
|
+
async with InfrahubMultiLock(lock_registry=lock.registry, locks=lock_names):
|
|
445
|
+
async with graphql_context.db.start_transaction() as dbt:
|
|
427
446
|
reconciled_prefix = await cls._reconcile_prefix(
|
|
428
447
|
branch=branch, db=dbt, prefix=prefix, namespace_id=namespace_id, is_delete=True
|
|
429
448
|
)
|
|
@@ -28,7 +28,7 @@ from infrahub.lock import InfrahubMultiLock
|
|
|
28
28
|
from infrahub.log import get_log_data, get_logger
|
|
29
29
|
from infrahub.profiles.node_applier import NodeProfilesApplier
|
|
30
30
|
|
|
31
|
-
from ...core.node.lock_utils import
|
|
31
|
+
from ...core.node.lock_utils import get_lock_names_on_object_mutation
|
|
32
32
|
from .node_getter.by_default_filter import MutationNodeGetterByDefaultFilter
|
|
33
33
|
|
|
34
34
|
if TYPE_CHECKING:
|
|
@@ -180,41 +180,40 @@ class InfrahubMutationMixin:
|
|
|
180
180
|
Wrapper around mutate_update to potentially activate locking and call it within a database transaction.
|
|
181
181
|
"""
|
|
182
182
|
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
183
|
+
# Prepare a clone to compute locks without triggering pool allocations
|
|
184
|
+
preview_obj = await NodeManager.get_one_by_id_or_default_filter(
|
|
185
|
+
db=db,
|
|
186
|
+
kind=obj.get_kind(),
|
|
187
|
+
id=obj.get_id(),
|
|
188
|
+
branch=branch,
|
|
186
189
|
)
|
|
190
|
+
await preview_obj.from_graphql(db=db, data=data, process_pools=False)
|
|
187
191
|
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
else:
|
|
192
|
+
schema_branch = db.schema.get_schema_branch(name=branch.name)
|
|
193
|
+
lock_names = get_lock_names_on_object_mutation(node=preview_obj, schema_branch=schema_branch)
|
|
194
|
+
|
|
195
|
+
# FIXME: do not lock when data does not contain uniqueness constraint fields or resource pool allocations
|
|
196
|
+
async with InfrahubMultiLock(lock_registry=lock.registry, locks=lock_names, metrics=False):
|
|
197
|
+
if db.is_transaction:
|
|
195
198
|
obj = await cls.mutate_update_object(
|
|
196
199
|
db=db, info=info, data=data, branch=branch, obj=obj, skip_uniqueness_check=skip_uniqueness_check
|
|
197
200
|
)
|
|
198
|
-
result = await cls.mutate_update_to_graphql(db=db, info=info, obj=obj)
|
|
199
|
-
return obj, result
|
|
200
201
|
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
db=dbt,
|
|
206
|
-
info=info,
|
|
207
|
-
data=data,
|
|
208
|
-
branch=branch,
|
|
209
|
-
obj=obj,
|
|
210
|
-
skip_uniqueness_check=skip_uniqueness_check,
|
|
211
|
-
)
|
|
212
|
-
else:
|
|
202
|
+
result = await cls.mutate_update_to_graphql(db=db, info=info, obj=obj)
|
|
203
|
+
return obj, result
|
|
204
|
+
|
|
205
|
+
async with db.start_transaction() as dbt:
|
|
213
206
|
obj = await cls.mutate_update_object(
|
|
214
|
-
db=dbt,
|
|
207
|
+
db=dbt,
|
|
208
|
+
info=info,
|
|
209
|
+
data=data,
|
|
210
|
+
branch=branch,
|
|
211
|
+
obj=obj,
|
|
212
|
+
skip_uniqueness_check=skip_uniqueness_check,
|
|
215
213
|
)
|
|
216
|
-
|
|
217
|
-
|
|
214
|
+
|
|
215
|
+
result = await cls.mutate_update_to_graphql(db=dbt, info=info, obj=obj)
|
|
216
|
+
return obj, result
|
|
218
217
|
|
|
219
218
|
@classmethod
|
|
220
219
|
@retry_db_transaction(name="object_update")
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
|
|
5
|
+
from graphql.language.ast import (
|
|
6
|
+
DocumentNode,
|
|
7
|
+
EnumTypeDefinitionNode,
|
|
8
|
+
EnumValueDefinitionNode,
|
|
9
|
+
FieldDefinitionNode,
|
|
10
|
+
InputObjectTypeDefinitionNode,
|
|
11
|
+
InputValueDefinitionNode,
|
|
12
|
+
InterfaceTypeDefinitionNode,
|
|
13
|
+
NamedTypeNode,
|
|
14
|
+
ObjectTypeDefinitionNode,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from graphql import DefinitionNode
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _sort_arguments(args: tuple[InputValueDefinitionNode, ...] | None) -> list[InputValueDefinitionNode] | None:
|
|
22
|
+
"""Sort arguments (filters) of a field alphabetically by name.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
args: List of input value definition nodes to sort, or None.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
Sorted list of input value definition nodes, or None if input was None.
|
|
29
|
+
"""
|
|
30
|
+
if not args:
|
|
31
|
+
return None
|
|
32
|
+
return sorted(args, key=lambda a: a.name.value)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _sort_fields(fields: tuple[FieldDefinitionNode, ...] | None) -> list[FieldDefinitionNode] | None:
|
|
36
|
+
"""Sort fields and their arguments alphabetically.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
fields: List of field definition nodes to sort, or None.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
Sorted list of field definition nodes with sorted arguments, or None if input was None.
|
|
43
|
+
"""
|
|
44
|
+
if not fields:
|
|
45
|
+
return None
|
|
46
|
+
sorted_fields = []
|
|
47
|
+
for field in sorted(fields, key=lambda fld: fld.name.value):
|
|
48
|
+
sorted_args = _sort_arguments(field.arguments)
|
|
49
|
+
sorted_fields.append(
|
|
50
|
+
FieldDefinitionNode(
|
|
51
|
+
name=field.name,
|
|
52
|
+
type=field.type,
|
|
53
|
+
arguments=sorted_args,
|
|
54
|
+
directives=field.directives,
|
|
55
|
+
description=field.description,
|
|
56
|
+
loc=field.loc,
|
|
57
|
+
)
|
|
58
|
+
)
|
|
59
|
+
return sorted_fields
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _sort_enum_values(values: tuple[EnumValueDefinitionNode, ...] | None) -> list[EnumValueDefinitionNode] | None:
|
|
63
|
+
"""Sort enum values alphabetically by name.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
values: List of enum value definition nodes to sort, or None.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Sorted list of enum value definition nodes, or None if input was None.
|
|
70
|
+
"""
|
|
71
|
+
if not values:
|
|
72
|
+
return None
|
|
73
|
+
return sorted(values, key=lambda v: v.name.value)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def _sort_input_fields(fields: tuple[InputValueDefinitionNode, ...] | None) -> list[InputValueDefinitionNode] | None:
|
|
77
|
+
"""Sort input object fields alphabetically by name.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
fields: List of input value definition nodes to sort, or None.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
Sorted list of input value definition nodes, or None if input was None.
|
|
84
|
+
"""
|
|
85
|
+
if not fields:
|
|
86
|
+
return None
|
|
87
|
+
return sorted(fields, key=lambda f: f.name.value)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def _sort_interfaces(interfaces: tuple[NamedTypeNode, ...] | None) -> list[NamedTypeNode] | None:
|
|
91
|
+
"""Sort interface implementations alphabetically by name.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
interfaces: Tuple of named type nodes representing interfaces, or None.
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
Sorted list of named type nodes, or None if input was None.
|
|
98
|
+
"""
|
|
99
|
+
if not interfaces:
|
|
100
|
+
return None
|
|
101
|
+
return sorted(interfaces, key=lambda i: i.name.value)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def sort_schema_ast(document: DocumentNode) -> DocumentNode:
|
|
105
|
+
"""Return a new DocumentNode with all definitions, fields, and arguments sorted alphabetically.
|
|
106
|
+
|
|
107
|
+
This function recursively sorts all GraphQL schema elements including:
|
|
108
|
+
- Type definitions (objects, interfaces, enums, input objects)
|
|
109
|
+
- Field definitions and their arguments
|
|
110
|
+
- Enum values
|
|
111
|
+
- Input object fields
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
document: The GraphQL document node containing schema definitions.
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
A new DocumentNode with all elements sorted alphabetically by name.
|
|
118
|
+
"""
|
|
119
|
+
|
|
120
|
+
sorted_definitions: list[
|
|
121
|
+
ObjectTypeDefinitionNode
|
|
122
|
+
| InterfaceTypeDefinitionNode
|
|
123
|
+
| EnumTypeDefinitionNode
|
|
124
|
+
| InputObjectTypeDefinitionNode
|
|
125
|
+
| DefinitionNode
|
|
126
|
+
] = []
|
|
127
|
+
|
|
128
|
+
for definition in sorted(
|
|
129
|
+
document.definitions, key=lambda d: getattr(d.name, "value", "") if hasattr(d, "name") and d.name else ""
|
|
130
|
+
):
|
|
131
|
+
if isinstance(definition, (ObjectTypeDefinitionNode, InterfaceTypeDefinitionNode)):
|
|
132
|
+
sorted_fields = _sort_fields(definition.fields)
|
|
133
|
+
sorted_interfaces = _sort_interfaces(definition.interfaces)
|
|
134
|
+
sorted_definitions.append(
|
|
135
|
+
definition.__class__(
|
|
136
|
+
name=definition.name,
|
|
137
|
+
interfaces=sorted_interfaces,
|
|
138
|
+
directives=definition.directives,
|
|
139
|
+
fields=sorted_fields,
|
|
140
|
+
description=definition.description,
|
|
141
|
+
loc=definition.loc,
|
|
142
|
+
)
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
elif isinstance(definition, EnumTypeDefinitionNode):
|
|
146
|
+
sorted_values = _sort_enum_values(definition.values)
|
|
147
|
+
sorted_definitions.append(
|
|
148
|
+
EnumTypeDefinitionNode(
|
|
149
|
+
name=definition.name,
|
|
150
|
+
directives=definition.directives,
|
|
151
|
+
values=sorted_values,
|
|
152
|
+
description=definition.description,
|
|
153
|
+
loc=definition.loc,
|
|
154
|
+
)
|
|
155
|
+
)
|
|
156
|
+
elif isinstance(definition, InputObjectTypeDefinitionNode):
|
|
157
|
+
sorted_inputs = _sort_input_fields(definition.fields)
|
|
158
|
+
sorted_definitions.append(
|
|
159
|
+
InputObjectTypeDefinitionNode(
|
|
160
|
+
name=definition.name,
|
|
161
|
+
directives=definition.directives,
|
|
162
|
+
fields=sorted_inputs,
|
|
163
|
+
description=definition.description,
|
|
164
|
+
loc=definition.loc,
|
|
165
|
+
)
|
|
166
|
+
)
|
|
167
|
+
else:
|
|
168
|
+
sorted_definitions.append(definition)
|
|
169
|
+
|
|
170
|
+
return DocumentNode(definitions=sorted_definitions)
|
infrahub/graphql/types/branch.py
CHANGED
|
@@ -2,11 +2,12 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
from typing import TYPE_CHECKING, Any
|
|
4
4
|
|
|
5
|
-
from graphene import Boolean, Field, String
|
|
5
|
+
from graphene import Boolean, Field, Int, String
|
|
6
6
|
|
|
7
7
|
from infrahub.core.branch import Branch
|
|
8
8
|
from infrahub.core.constants import GLOBAL_BRANCH_NAME
|
|
9
9
|
|
|
10
|
+
from .enums import InfrahubBranchStatus
|
|
10
11
|
from .standard_node import InfrahubObjectType
|
|
11
12
|
|
|
12
13
|
if TYPE_CHECKING:
|
|
@@ -19,6 +20,8 @@ class BranchType(InfrahubObjectType):
|
|
|
19
20
|
description = String(required=False)
|
|
20
21
|
origin_branch = String(required=False)
|
|
21
22
|
branched_from = String(required=False)
|
|
23
|
+
status = InfrahubBranchStatus(required=True)
|
|
24
|
+
graph_version = Int(required=False)
|
|
22
25
|
created_at = String(required=False)
|
|
23
26
|
sync_with_git = Boolean(required=False)
|
|
24
27
|
is_default = Boolean(required=False)
|
infrahub/graphql/types/enums.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from graphene import Enum
|
|
2
2
|
|
|
3
3
|
from infrahub.core import constants
|
|
4
|
+
from infrahub.core.branch.enums import BranchStatus
|
|
4
5
|
from infrahub.permissions import constants as permission_constants
|
|
5
6
|
|
|
6
7
|
CheckType = Enum.from_enum(constants.CheckType)
|
|
@@ -10,3 +11,5 @@ DiffAction = Enum.from_enum(constants.DiffAction)
|
|
|
10
11
|
Severity = Enum.from_enum(constants.Severity)
|
|
11
12
|
|
|
12
13
|
BranchRelativePermissionDecision = Enum.from_enum(permission_constants.BranchRelativePermissionDecision)
|
|
14
|
+
|
|
15
|
+
InfrahubBranchStatus = Enum.from_enum(BranchStatus)
|
infrahub/hfid/tasks.py
CHANGED
|
@@ -2,6 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
from typing import cast
|
|
4
4
|
|
|
5
|
+
from infrahub_sdk.exceptions import URLNotFoundError
|
|
5
6
|
from prefect import flow
|
|
6
7
|
from prefect.logging import get_run_logger
|
|
7
8
|
|
|
@@ -56,12 +57,17 @@ async def hfid_update_value(
|
|
|
56
57
|
log.debug(f"Ignoring to update {obj} with existing value on human_friendly_id={obj.hfid_value}")
|
|
57
58
|
return
|
|
58
59
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
60
|
+
try:
|
|
61
|
+
await client.execute_graphql(
|
|
62
|
+
query=UPDATE_HFID,
|
|
63
|
+
variables={"id": obj.node_id, "kind": node_kind, "value": rendered_hfid},
|
|
64
|
+
branch_name=branch_name,
|
|
65
|
+
)
|
|
66
|
+
log.info(f"Updating {node_kind}.human_friendly_id='{rendered_hfid}' ({obj.node_id})")
|
|
67
|
+
except URLNotFoundError:
|
|
68
|
+
log.warning(
|
|
69
|
+
f"Updating {node_kind}.human_friendly_id='{rendered_hfid}' ({obj.node_id}) failed for branch {branch_name} (branch not found)"
|
|
70
|
+
)
|
|
65
71
|
|
|
66
72
|
|
|
67
73
|
@flow(
|
|
@@ -151,7 +157,7 @@ async def hfid_setup(context: InfrahubContext, branch_name: str | None = None, e
|
|
|
151
157
|
|
|
152
158
|
@flow(
|
|
153
159
|
name="trigger-update-hfid",
|
|
154
|
-
flow_run_name="Trigger updates for
|
|
160
|
+
flow_run_name="Trigger updates for HFID for {kind}",
|
|
155
161
|
)
|
|
156
162
|
async def trigger_update_hfid(
|
|
157
163
|
branch_name: str,
|
infrahub/lock.py
CHANGED
|
@@ -5,6 +5,7 @@ import time
|
|
|
5
5
|
import uuid
|
|
6
6
|
from asyncio import Lock as LocalLock
|
|
7
7
|
from asyncio import sleep
|
|
8
|
+
from contextvars import ContextVar
|
|
8
9
|
from typing import TYPE_CHECKING
|
|
9
10
|
|
|
10
11
|
import redis.asyncio as redis
|
|
@@ -50,9 +51,12 @@ GLOBAL_GRAPH_LOCK = "global.graph"
|
|
|
50
51
|
class InfrahubMultiLock:
|
|
51
52
|
"""Context manager to allow multiple locks to be reserved together"""
|
|
52
53
|
|
|
53
|
-
def __init__(
|
|
54
|
+
def __init__(
|
|
55
|
+
self, lock_registry: InfrahubLockRegistry, locks: list[str] | None = None, metrics: bool = True
|
|
56
|
+
) -> None:
|
|
54
57
|
self.registry = lock_registry
|
|
55
58
|
self.locks = locks or []
|
|
59
|
+
self.metrics = metrics
|
|
56
60
|
|
|
57
61
|
async def __aenter__(self):
|
|
58
62
|
await self.acquire()
|
|
@@ -67,11 +71,11 @@ class InfrahubMultiLock:
|
|
|
67
71
|
|
|
68
72
|
async def acquire(self) -> None:
|
|
69
73
|
for lock in self.locks:
|
|
70
|
-
await self.registry.get(name=lock).acquire()
|
|
74
|
+
await self.registry.get(name=lock, metrics=self.metrics).acquire()
|
|
71
75
|
|
|
72
76
|
async def release(self) -> None:
|
|
73
77
|
for lock in reversed(self.locks):
|
|
74
|
-
await self.registry.get(name=lock).release()
|
|
78
|
+
await self.registry.get(name=lock, metrics=self.metrics).release()
|
|
75
79
|
|
|
76
80
|
|
|
77
81
|
class NATSLock:
|
|
@@ -123,6 +127,7 @@ class InfrahubLock:
|
|
|
123
127
|
connection: redis.Redis | InfrahubServices | None = None,
|
|
124
128
|
local: bool | None = None,
|
|
125
129
|
in_multi: bool = False,
|
|
130
|
+
metrics: bool = True,
|
|
126
131
|
) -> None:
|
|
127
132
|
self.use_local: bool | None = local
|
|
128
133
|
self.local: LocalLock = None
|
|
@@ -133,6 +138,8 @@ class InfrahubLock:
|
|
|
133
138
|
self.lock_type: str = "multi" if self.in_multi else "individual"
|
|
134
139
|
self._acquire_time: int | None = None
|
|
135
140
|
self.event = asyncio.Event()
|
|
141
|
+
self._recursion_var: ContextVar[int | None] = ContextVar(f"infrahub_lock_recursion_{self.name}", default=None)
|
|
142
|
+
self.metrics = metrics
|
|
136
143
|
|
|
137
144
|
if not self.connection or (self.use_local is None and name.startswith("local.")):
|
|
138
145
|
self.use_local = True
|
|
@@ -167,21 +174,47 @@ class InfrahubLock:
|
|
|
167
174
|
await self.release()
|
|
168
175
|
|
|
169
176
|
async def acquire(self) -> None:
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
177
|
+
depth = self._recursion_var.get()
|
|
178
|
+
if depth is not None:
|
|
179
|
+
self._recursion_var.set(depth + 1)
|
|
180
|
+
return
|
|
181
|
+
|
|
182
|
+
if self.metrics:
|
|
183
|
+
with LOCK_ACQUIRE_TIME_METRICS.labels(self.name, self.lock_type).time():
|
|
184
|
+
if not self.use_local:
|
|
185
|
+
await self.remote.acquire(token=f"{current_timestamp()}::{WORKER_IDENTITY}")
|
|
186
|
+
else:
|
|
187
|
+
await self.local.acquire()
|
|
188
|
+
elif not self.use_local:
|
|
189
|
+
await self.remote.acquire(token=f"{current_timestamp()}::{WORKER_IDENTITY}")
|
|
190
|
+
else:
|
|
191
|
+
await self.local.acquire()
|
|
192
|
+
|
|
175
193
|
self.acquire_time = time.time_ns()
|
|
176
194
|
self.event.clear()
|
|
195
|
+
self._recursion_var.set(1)
|
|
177
196
|
|
|
178
197
|
async def release(self) -> None:
|
|
179
|
-
|
|
180
|
-
|
|
198
|
+
depth = self._recursion_var.get()
|
|
199
|
+
if depth is None:
|
|
200
|
+
raise RuntimeError("Lock release attempted without ownership context.")
|
|
201
|
+
|
|
202
|
+
if depth > 1:
|
|
203
|
+
self._recursion_var.set(depth - 1)
|
|
204
|
+
return
|
|
205
|
+
|
|
206
|
+
if self.acquire_time is not None:
|
|
207
|
+
duration_ns = time.time_ns() - self.acquire_time
|
|
208
|
+
if self.metrics:
|
|
209
|
+
LOCK_RESERVE_TIME_METRICS.labels(self.name, self.lock_type).observe(duration_ns / 1000000000)
|
|
210
|
+
self.acquire_time = None
|
|
211
|
+
|
|
181
212
|
if not self.use_local:
|
|
182
213
|
await self.remote.release()
|
|
183
214
|
else:
|
|
184
215
|
self.local.release()
|
|
216
|
+
|
|
217
|
+
self._recursion_var.set(None)
|
|
185
218
|
self.event.set()
|
|
186
219
|
|
|
187
220
|
async def locked(self) -> bool:
|
|
@@ -272,11 +305,18 @@ class InfrahubLockRegistry:
|
|
|
272
305
|
return self.locks[lock_name]
|
|
273
306
|
|
|
274
307
|
def get(
|
|
275
|
-
self,
|
|
308
|
+
self,
|
|
309
|
+
name: str,
|
|
310
|
+
namespace: str | None = None,
|
|
311
|
+
local: bool | None = None,
|
|
312
|
+
in_multi: bool = False,
|
|
313
|
+
metrics: bool = True,
|
|
276
314
|
) -> InfrahubLock:
|
|
277
315
|
lock_name = self.name_generator.generate_name(name=name, namespace=namespace, local=local)
|
|
278
316
|
if lock_name not in self.locks:
|
|
279
|
-
self.locks[lock_name] = InfrahubLock(
|
|
317
|
+
self.locks[lock_name] = InfrahubLock(
|
|
318
|
+
name=lock_name, connection=self.connection, in_multi=in_multi, metrics=metrics
|
|
319
|
+
)
|
|
280
320
|
return self.locks[lock_name]
|
|
281
321
|
|
|
282
322
|
def local_schema_lock(self) -> LocalLock:
|
infrahub/message_bus/types.py
CHANGED
|
@@ -89,7 +89,8 @@ class ProposedChangeArtifactDefinition(BaseModel):
|
|
|
89
89
|
definition_id: str
|
|
90
90
|
definition_name: str
|
|
91
91
|
artifact_name: str
|
|
92
|
-
query_name: str
|
|
92
|
+
query_name: str # Deprecated
|
|
93
|
+
query_id: str
|
|
93
94
|
query_models: list[str]
|
|
94
95
|
repository_id: str
|
|
95
96
|
transform_kind: str
|
|
@@ -30,6 +30,7 @@ GLOBAL_PERMISSION_DENIAL_MESSAGE = {
|
|
|
30
30
|
GlobalPermissions.MANAGE_ACCOUNTS.value: "You are not allowed to manage user accounts, groups or roles",
|
|
31
31
|
GlobalPermissions.MANAGE_PERMISSIONS.value: "You are not allowed to manage permissions",
|
|
32
32
|
GlobalPermissions.MANAGE_REPOSITORIES.value: "You are not allowed to manage repositories",
|
|
33
|
+
GlobalPermissions.UPDATE_OBJECT_HFID_DISPLAY_LABEL.value: "You are not allowed to update human friendly IDs and display labels ad hoc",
|
|
33
34
|
}
|
|
34
35
|
|
|
35
36
|
GLOBAL_PERMISSION_DESCRIPTION = {
|
|
@@ -42,4 +43,5 @@ GLOBAL_PERMISSION_DESCRIPTION = {
|
|
|
42
43
|
GlobalPermissions.MANAGE_PERMISSIONS: "Allow a user to manage permissions",
|
|
43
44
|
GlobalPermissions.MANAGE_REPOSITORIES: "Allow a user to manage repositories",
|
|
44
45
|
GlobalPermissions.SUPER_ADMIN: "Allow a user to do anything",
|
|
46
|
+
GlobalPermissions.UPDATE_OBJECT_HFID_DISPLAY_LABEL: "Allow a user to update objects' display labels and human friendly IDs ad hoc",
|
|
45
47
|
}
|