infrahub-server 1.6.0__py3-none-any.whl → 1.6.0b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/config.py +1 -1
- infrahub/core/constants/__init__.py +0 -1
- infrahub/core/manager.py +31 -36
- infrahub/core/models.py +6 -5
- infrahub/core/node/__init__.py +5 -11
- infrahub/core/node/create.py +8 -36
- infrahub/core/node/standard.py +1 -1
- infrahub/core/protocols.py +7 -1
- infrahub/core/query/attribute.py +1 -1
- infrahub/core/query/node.py +5 -9
- infrahub/core/relationship/model.py +1 -12
- infrahub/core/schema/generic_schema.py +1 -1
- infrahub/core/schema/schema_branch.py +16 -35
- infrahub/core/validators/determiner.py +6 -3
- infrahub/database/__init__.py +1 -1
- infrahub/git/base.py +3 -2
- infrahub/graphql/loaders/peers.py +0 -6
- infrahub/graphql/mutations/profile.py +1 -8
- infrahub/graphql/queries/resource_manager.py +3 -2
- infrahub/graphql/queries/search.py +3 -2
- infrahub/graphql/resolvers/ipam.py +0 -20
- infrahub/graphql/resolvers/many_relationship.py +11 -12
- infrahub/graphql/resolvers/resolver.py +2 -6
- infrahub/graphql/resolvers/single_relationship.py +11 -1
- infrahub/log.py +1 -1
- infrahub/message_bus/messages/__init__.py +12 -0
- infrahub/profiles/node_applier.py +0 -9
- infrahub/proposed_change/tasks.py +1 -1
- infrahub/task_manager/task.py +2 -2
- infrahub/trigger/setup.py +9 -6
- infrahub/utils.py +0 -18
- infrahub/workers/infrahub_async.py +6 -7
- infrahub_sdk/client.py +1 -113
- infrahub_sdk/ctl/branch.py +1 -175
- infrahub_sdk/ctl/check.py +3 -3
- infrahub_sdk/ctl/cli_commands.py +9 -9
- infrahub_sdk/ctl/generator.py +2 -2
- infrahub_sdk/ctl/graphql.py +2 -1
- infrahub_sdk/ctl/importer.py +2 -1
- infrahub_sdk/ctl/repository.py +6 -5
- infrahub_sdk/ctl/task.py +4 -2
- infrahub_sdk/ctl/utils.py +2 -2
- infrahub_sdk/ctl/validate.py +2 -1
- infrahub_sdk/diff.py +3 -80
- infrahub_sdk/graphql/constants.py +1 -14
- infrahub_sdk/graphql/renderers.py +1 -5
- infrahub_sdk/node/attribute.py +1 -0
- infrahub_sdk/node/constants.py +1 -1
- infrahub_sdk/node/related_node.py +2 -1
- infrahub_sdk/node/relationship.py +2 -1
- infrahub_sdk/protocols_base.py +1 -0
- infrahub_sdk/schema/__init__.py +3 -0
- {infrahub_server-1.6.0.dist-info → infrahub_server-1.6.0b0.dist-info}/METADATA +1 -1
- {infrahub_server-1.6.0.dist-info → infrahub_server-1.6.0b0.dist-info}/RECORD +58 -58
- {infrahub_server-1.6.0.dist-info → infrahub_server-1.6.0b0.dist-info}/WHEEL +1 -1
- infrahub_testcontainers/container.py +2 -2
- {infrahub_server-1.6.0.dist-info → infrahub_server-1.6.0b0.dist-info}/entry_points.txt +0 -0
- {infrahub_server-1.6.0.dist-info → infrahub_server-1.6.0b0.dist-info}/licenses/LICENSE.txt +0 -0
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
import contextlib
|
|
4
3
|
import ipaddress
|
|
5
4
|
from typing import TYPE_CHECKING, Any
|
|
6
5
|
|
|
@@ -118,9 +117,11 @@ async def search_resolver(
|
|
|
118
117
|
if matching:
|
|
119
118
|
results.append(matching)
|
|
120
119
|
else:
|
|
121
|
-
|
|
120
|
+
try:
|
|
122
121
|
# Convert any IPv6 address, network or partial address to collapsed format as it might be stored in db.
|
|
123
122
|
q = _collapse_ipv6(q)
|
|
123
|
+
except (ValueError, ipaddress.AddressValueError):
|
|
124
|
+
pass
|
|
124
125
|
|
|
125
126
|
for kind in [InfrahubKind.NODE, InfrahubKind.GENERICGROUP]:
|
|
126
127
|
objs = await NodeManager.query(
|
|
@@ -4,7 +4,6 @@ import ipaddress
|
|
|
4
4
|
from typing import TYPE_CHECKING, Any
|
|
5
5
|
|
|
6
6
|
from graphql.type.definition import GraphQLNonNull
|
|
7
|
-
from infrahub_sdk.utils import deep_merge_dict
|
|
8
7
|
from netaddr import IPSet
|
|
9
8
|
from opentelemetry import trace
|
|
10
9
|
|
|
@@ -234,23 +233,6 @@ async def _resolve_available_prefix_nodes(
|
|
|
234
233
|
return available_nodes
|
|
235
234
|
|
|
236
235
|
|
|
237
|
-
def _ensure_display_label_fields(
|
|
238
|
-
db: InfrahubDatabase, branch: Branch, schema: NodeSchema | GenericSchema, node_fields: dict[str, Any]
|
|
239
|
-
) -> None:
|
|
240
|
-
"""Ensure fields needed to compute display_label are included in node_fields.
|
|
241
|
-
|
|
242
|
-
This is mostly for virtual nodes (InternalIPPrefixAvailable, InternalIPRangeAvailable) that are not stored in the
|
|
243
|
-
database.
|
|
244
|
-
"""
|
|
245
|
-
if "display_label" not in node_fields or schema.kind not in [InfrahubKind.IPPREFIX, InfrahubKind.IPADDRESS]:
|
|
246
|
-
return
|
|
247
|
-
|
|
248
|
-
schema_branch = db.schema.get_schema_branch(name=branch.name)
|
|
249
|
-
display_label_fields = schema_branch.generate_fields_for_display_label(name=schema.kind)
|
|
250
|
-
if display_label_fields:
|
|
251
|
-
deep_merge_dict(dicta=node_fields, dictb=display_label_fields)
|
|
252
|
-
|
|
253
|
-
|
|
254
236
|
def _filter_kinds(nodes: list[Node], kinds: list[str], limit: int | None) -> list[Node]:
|
|
255
237
|
filtered: list[Node] = []
|
|
256
238
|
available_node_kinds = [InfrahubKind.IPPREFIXAVAILABLE, InfrahubKind.IPRANGEAVAILABLE]
|
|
@@ -342,8 +324,6 @@ async def ipam_paginated_list_resolver( # noqa: PLR0915
|
|
|
342
324
|
edges = fields.get("edges", {})
|
|
343
325
|
node_fields = edges.get("node", {})
|
|
344
326
|
|
|
345
|
-
_ensure_display_label_fields(db=db, branch=graphql_context.branch, schema=schema, node_fields=node_fields)
|
|
346
|
-
|
|
347
327
|
permission_set: dict[str, Any] | None = None
|
|
348
328
|
permissions = (
|
|
349
329
|
await get_permissions(schema=schema, graphql_context=graphql_context)
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import TYPE_CHECKING, Any
|
|
2
2
|
|
|
3
3
|
from graphql import GraphQLResolveInfo
|
|
4
|
+
from infrahub_sdk.utils import deep_merge_dict
|
|
4
5
|
|
|
5
6
|
from infrahub.core.branch.models import Branch
|
|
6
7
|
from infrahub.core.constants import BranchSupportType, RelationshipHierarchyDirection
|
|
@@ -11,7 +12,6 @@ from infrahub.core.schema.relationship_schema import RelationshipSchema
|
|
|
11
12
|
from infrahub.core.timestamp import Timestamp
|
|
12
13
|
from infrahub.database import InfrahubDatabase
|
|
13
14
|
from infrahub.graphql.field_extractor import extract_graphql_fields
|
|
14
|
-
from infrahub.utils import has_any_key
|
|
15
15
|
|
|
16
16
|
from ..loaders.peers import PeerRelationshipsDataLoader, QueryPeerParams
|
|
17
17
|
from ..types import RELATIONS_PROPERTY_MAP, RELATIONS_PROPERTY_MAP_REVERSED
|
|
@@ -195,9 +195,6 @@ class ManyRelationshipResolver:
|
|
|
195
195
|
offset: int | None = None,
|
|
196
196
|
limit: int | None = None,
|
|
197
197
|
) -> list[dict[str, Any]] | None:
|
|
198
|
-
include_source = has_any_key(data=node_fields, keys=["_relation__source", "source"])
|
|
199
|
-
include_owner = has_any_key(data=node_fields, keys=["_relation__owner", "owner"])
|
|
200
|
-
|
|
201
198
|
async with db.start_session(read_only=True) as dbs:
|
|
202
199
|
objs = await NodeManager.query_peers(
|
|
203
200
|
db=dbs,
|
|
@@ -212,8 +209,6 @@ class ManyRelationshipResolver:
|
|
|
212
209
|
branch=branch,
|
|
213
210
|
branch_agnostic=rel_schema.branch is BranchSupportType.AGNOSTIC,
|
|
214
211
|
fetch_peers=True,
|
|
215
|
-
include_source=include_source,
|
|
216
|
-
include_owner=include_owner,
|
|
217
212
|
)
|
|
218
213
|
if not objs:
|
|
219
214
|
return None
|
|
@@ -231,11 +226,17 @@ class ManyRelationshipResolver:
|
|
|
231
226
|
filters: dict[str, Any],
|
|
232
227
|
node_fields: dict[str, Any],
|
|
233
228
|
) -> list[dict[str, Any]] | None:
|
|
234
|
-
if node_fields and "
|
|
235
|
-
|
|
229
|
+
if node_fields and "display_label" in node_fields:
|
|
230
|
+
schema_branch = db.schema.get_schema_branch(name=branch.name)
|
|
231
|
+
display_label_fields = schema_branch.generate_fields_for_display_label(name=rel_schema.peer)
|
|
232
|
+
if display_label_fields:
|
|
233
|
+
node_fields = deep_merge_dict(dicta=node_fields, dictb=display_label_fields)
|
|
236
234
|
|
|
237
|
-
|
|
238
|
-
|
|
235
|
+
if node_fields and "hfid" in node_fields:
|
|
236
|
+
peer_schema = db.schema.get(name=rel_schema.peer, branch=branch, duplicate=False)
|
|
237
|
+
hfid_fields = peer_schema.generate_fields_for_hfid()
|
|
238
|
+
if hfid_fields:
|
|
239
|
+
node_fields = deep_merge_dict(dicta=node_fields, dictb=hfid_fields)
|
|
239
240
|
|
|
240
241
|
query_params = QueryPeerParams(
|
|
241
242
|
branch=branch,
|
|
@@ -245,8 +246,6 @@ class ManyRelationshipResolver:
|
|
|
245
246
|
fields=node_fields,
|
|
246
247
|
at=at,
|
|
247
248
|
branch_agnostic=rel_schema.branch is BranchSupportType.AGNOSTIC,
|
|
248
|
-
include_source=include_source,
|
|
249
|
-
include_owner=include_owner,
|
|
250
249
|
)
|
|
251
250
|
if query_params in self._data_loader_instances:
|
|
252
251
|
loader = self._data_loader_instances[query_params]
|
|
@@ -9,7 +9,6 @@ from infrahub.core.constants import BranchSupportType, InfrahubKind, Relationshi
|
|
|
9
9
|
from infrahub.core.manager import NodeManager
|
|
10
10
|
from infrahub.exceptions import NodeNotFoundError
|
|
11
11
|
from infrahub.graphql.field_extractor import extract_graphql_fields
|
|
12
|
-
from infrahub.utils import has_any_key
|
|
13
12
|
|
|
14
13
|
from ..models import OrderModel
|
|
15
14
|
from ..parser import extract_selection
|
|
@@ -186,9 +185,6 @@ async def default_paginated_list_resolver(
|
|
|
186
185
|
|
|
187
186
|
objs = []
|
|
188
187
|
if edges or "hfid" in filters:
|
|
189
|
-
include_source = has_any_key(data=node_fields, keys=["_relation__source", "source"])
|
|
190
|
-
include_owner = has_any_key(data=node_fields, keys=["_relation__owner", "owner"])
|
|
191
|
-
|
|
192
188
|
objs = await NodeManager.query(
|
|
193
189
|
db=db,
|
|
194
190
|
schema=schema,
|
|
@@ -199,8 +195,8 @@ async def default_paginated_list_resolver(
|
|
|
199
195
|
limit=limit,
|
|
200
196
|
offset=offset,
|
|
201
197
|
account=graphql_context.account_session,
|
|
202
|
-
include_source=
|
|
203
|
-
include_owner=
|
|
198
|
+
include_source=True,
|
|
199
|
+
include_owner=True,
|
|
204
200
|
partial_match=partial_match,
|
|
205
201
|
order=order,
|
|
206
202
|
)
|
|
@@ -2,6 +2,7 @@ from typing import TYPE_CHECKING, Any
|
|
|
2
2
|
|
|
3
3
|
from graphql import GraphQLResolveInfo
|
|
4
4
|
from graphql.type.definition import GraphQLNonNull
|
|
5
|
+
from infrahub_sdk.utils import deep_merge_dict
|
|
5
6
|
|
|
6
7
|
from infrahub.core.branch.models import Branch
|
|
7
8
|
from infrahub.core.constants import BranchSupportType
|
|
@@ -141,8 +142,17 @@ class SingleRelationshipResolver:
|
|
|
141
142
|
except (KeyError, IndexError):
|
|
142
143
|
return None
|
|
143
144
|
|
|
145
|
+
if node_fields and "display_label" in node_fields:
|
|
146
|
+
schema_branch = db.schema.get_schema_branch(name=branch.name)
|
|
147
|
+
display_label_fields = schema_branch.generate_fields_for_display_label(name=rel_schema.peer)
|
|
148
|
+
if display_label_fields:
|
|
149
|
+
node_fields = deep_merge_dict(dicta=node_fields, dictb=display_label_fields)
|
|
150
|
+
|
|
144
151
|
if node_fields and "hfid" in node_fields:
|
|
145
|
-
|
|
152
|
+
peer_schema = db.schema.get(name=rel_schema.peer, branch=branch, duplicate=False)
|
|
153
|
+
hfid_fields = peer_schema.generate_fields_for_hfid()
|
|
154
|
+
if hfid_fields:
|
|
155
|
+
node_fields = deep_merge_dict(dicta=node_fields, dictb=hfid_fields)
|
|
146
156
|
|
|
147
157
|
query_params = GetManyParams(
|
|
148
158
|
fields=node_fields,
|
infrahub/log.py
CHANGED
|
@@ -10,7 +10,7 @@ from structlog.dev import plain_traceback
|
|
|
10
10
|
if TYPE_CHECKING:
|
|
11
11
|
from structlog.types import Processor
|
|
12
12
|
|
|
13
|
-
INFRAHUB_PRODUCTION = TypeAdapter(bool).validate_python(os.environ.get("INFRAHUB_PRODUCTION",
|
|
13
|
+
INFRAHUB_PRODUCTION = TypeAdapter(bool).validate_python(os.environ.get("INFRAHUB_PRODUCTION", True))
|
|
14
14
|
INFRAHUB_LOG_LEVEL = os.environ.get("INFRAHUB_LOG_LEVEL", "INFO")
|
|
15
15
|
|
|
16
16
|
|
|
@@ -22,8 +22,20 @@ RESPONSE_MAP: dict[str, type[InfrahubResponse]] = {
|
|
|
22
22
|
}
|
|
23
23
|
|
|
24
24
|
PRIORITY_MAP = {
|
|
25
|
+
"check.artifact.create": 2,
|
|
26
|
+
"check.repository.check_definition": 2,
|
|
27
|
+
"check.repository.merge_conflicts": 2,
|
|
25
28
|
"send.echo.request": 5, # Currently only for testing purposes, will be removed once all message bus have been migrated to prefect
|
|
29
|
+
"event.branch.delete": 5,
|
|
30
|
+
"event.branch.merge": 5,
|
|
31
|
+
"event.schema.update": 5,
|
|
32
|
+
"git.diff.names_only": 4,
|
|
26
33
|
"git.file.get": 4,
|
|
34
|
+
"request.artifact.generate": 2,
|
|
35
|
+
"request.git.sync": 4,
|
|
36
|
+
"request.proposed_change.pipeline": 5,
|
|
37
|
+
"transform.jinja.template": 4,
|
|
38
|
+
"transform.python.data": 4,
|
|
27
39
|
}
|
|
28
40
|
|
|
29
41
|
|
|
@@ -9,15 +9,6 @@ from .queries.get_profile_data import GetProfileDataQuery, ProfileData
|
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class NodeProfilesApplier:
|
|
12
|
-
"""Applies profile values to nodes and templates.
|
|
13
|
-
|
|
14
|
-
Profile values take precedence over both default values and template-sourced values.
|
|
15
|
-
When a template has profiles assigned:
|
|
16
|
-
1. Profile values are applied to the template itself
|
|
17
|
-
2. Nodes created from that template inherit the profile values (not the template's own values)
|
|
18
|
-
3. Profile priority determines which profile wins when multiple profiles set the same attribute
|
|
19
|
-
"""
|
|
20
|
-
|
|
21
12
|
def __init__(self, db: InfrahubDatabase, branch: Branch):
|
|
22
13
|
self.db = db
|
|
23
14
|
self.branch = branch
|
|
@@ -480,7 +480,7 @@ async def _get_proposed_change_schema_integrity_constraints(
|
|
|
480
480
|
DiffElementType.RELATIONSHIP_ONE.value.lower(),
|
|
481
481
|
):
|
|
482
482
|
field_summary.relationship_names.add(element_name)
|
|
483
|
-
elif element_type.lower()
|
|
483
|
+
elif element_type.lower() in (DiffElementType.ATTRIBUTE.value.lower(),):
|
|
484
484
|
field_summary.attribute_names.add(element_name)
|
|
485
485
|
|
|
486
486
|
determiner = ConstraintValidatorDeterminer(schema_branch=schema)
|
infrahub/task_manager/task.py
CHANGED
|
@@ -151,7 +151,7 @@ class PrefectTask:
|
|
|
151
151
|
remaining -= nb_fetched
|
|
152
152
|
|
|
153
153
|
for flow_log in all_logs:
|
|
154
|
-
if flow_log.flow_run_id and flow_log.message
|
|
154
|
+
if flow_log.flow_run_id and flow_log.message not in ["Finished in state Completed()"]:
|
|
155
155
|
logs_flow.logs[flow_log.flow_run_id].append(flow_log)
|
|
156
156
|
|
|
157
157
|
return logs_flow
|
|
@@ -325,7 +325,7 @@ class PrefectTask:
|
|
|
325
325
|
"parameters": flow.parameters,
|
|
326
326
|
"branch": await cls._extract_branch_name(flow=flow),
|
|
327
327
|
"tags": flow.tags,
|
|
328
|
-
"workflow": workflow_names.get(flow.flow_id),
|
|
328
|
+
"workflow": workflow_names.get(flow.flow_id, None),
|
|
329
329
|
"related_node": related_node.id if related_node else None,
|
|
330
330
|
"related_node_kind": related_node.kind if related_node else None,
|
|
331
331
|
"related_nodes": related_nodes_info.get_related_nodes_as_dict(flow_id=flow.id),
|
infrahub/trigger/setup.py
CHANGED
|
@@ -122,7 +122,7 @@ async def setup_triggers(
|
|
|
122
122
|
actions=[action.get_prefect(mapping=deployments_mapping) for action in trigger.actions],
|
|
123
123
|
)
|
|
124
124
|
|
|
125
|
-
existing_automation = existing_automations.get(trigger.generate_name())
|
|
125
|
+
existing_automation = existing_automations.get(trigger.generate_name(), None)
|
|
126
126
|
|
|
127
127
|
if existing_automation:
|
|
128
128
|
trigger_comparison = compare_automations(
|
|
@@ -171,16 +171,19 @@ async def gather_all_automations(client: PrefectClient) -> list[Automation]:
|
|
|
171
171
|
retrieves them all by paginating through the results. The default within Prefect is 200 items,
|
|
172
172
|
and client.read_automations() doesn't support pagination parameters.
|
|
173
173
|
"""
|
|
174
|
+
automation_count_response = await client.request("POST", "/automations/count")
|
|
175
|
+
automation_count_response.raise_for_status()
|
|
176
|
+
automation_count: int = automation_count_response.json()
|
|
174
177
|
offset = 0
|
|
175
178
|
limit = 200
|
|
179
|
+
missing_automations = True
|
|
176
180
|
automations: list[Automation] = []
|
|
177
|
-
while
|
|
181
|
+
while missing_automations:
|
|
178
182
|
response = await client.request("POST", "/automations/filter", json={"limit": limit, "offset": offset})
|
|
179
183
|
response.raise_for_status()
|
|
180
|
-
|
|
181
|
-
automations
|
|
182
|
-
|
|
183
|
-
break
|
|
184
|
+
automations.extend(Automation.model_validate_list(response.json()))
|
|
185
|
+
if len(automations) >= automation_count:
|
|
186
|
+
missing_automations = False
|
|
184
187
|
offset += limit
|
|
185
188
|
|
|
186
189
|
return automations
|
infrahub/utils.py
CHANGED
|
@@ -83,21 +83,3 @@ def get_all_subclasses[AnyClass: type](cls: AnyClass) -> list[AnyClass]:
|
|
|
83
83
|
subclasses.append(subclass)
|
|
84
84
|
subclasses.extend(get_all_subclasses(subclass))
|
|
85
85
|
return subclasses
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
def has_any_key(data: dict[str, Any], keys: list[str]) -> bool:
|
|
89
|
-
"""Recursively check if any of the specified keys exist in the dictionary at any level.
|
|
90
|
-
|
|
91
|
-
Args:
|
|
92
|
-
data: The dictionary to search through
|
|
93
|
-
keys: List of key names to search for
|
|
94
|
-
|
|
95
|
-
Returns:
|
|
96
|
-
True if any of the keys are found at any level of the dictionary, False otherwise
|
|
97
|
-
"""
|
|
98
|
-
for key, value in data.items():
|
|
99
|
-
if key in keys:
|
|
100
|
-
return True
|
|
101
|
-
if isinstance(value, dict) and has_any_key(data=value, keys=keys):
|
|
102
|
-
return True
|
|
103
|
-
return False
|
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
-
import contextlib
|
|
3
2
|
import logging
|
|
4
3
|
import os
|
|
5
4
|
from pathlib import Path
|
|
@@ -108,7 +107,7 @@ class InfrahubWorkerAsync(BaseWorker):
|
|
|
108
107
|
|
|
109
108
|
# Start metric endpoint
|
|
110
109
|
if metric_port is None or metric_port != 0:
|
|
111
|
-
metric_port = metric_port or int(os.environ.get("INFRAHUB_METRICS_PORT",
|
|
110
|
+
metric_port = metric_port or int(os.environ.get("INFRAHUB_METRICS_PORT", 8000))
|
|
112
111
|
self._logger.info(f"Starting metric endpoint on port {metric_port}")
|
|
113
112
|
start_http_server(metric_port)
|
|
114
113
|
|
|
@@ -213,18 +212,18 @@ class InfrahubWorkerAsync(BaseWorker):
|
|
|
213
212
|
global_config_file = config.SETTINGS.git.global_config_file
|
|
214
213
|
if not os.getenv("GIT_CONFIG_GLOBAL") and global_config_file:
|
|
215
214
|
config_dir = Path(global_config_file).parent
|
|
216
|
-
|
|
215
|
+
try:
|
|
217
216
|
config_dir.mkdir(exist_ok=True, parents=True)
|
|
217
|
+
except FileExistsError:
|
|
218
|
+
pass
|
|
218
219
|
os.environ["GIT_CONFIG_GLOBAL"] = global_config_file
|
|
219
220
|
self._logger.info(f"Set git config file to {global_config_file}")
|
|
220
221
|
|
|
221
222
|
await self._run_git_config_global(config.SETTINGS.git.user_name, setting_name="user.name")
|
|
222
223
|
await self._run_git_config_global(config.SETTINGS.git.user_email, setting_name="user.email")
|
|
223
|
-
await self._run_git_config_global("*", "--replace-all", setting_name="safe.directory")
|
|
224
|
+
await self._run_git_config_global("'*'", "--replace-all", setting_name="safe.directory")
|
|
224
225
|
await self._run_git_config_global("true", setting_name="credential.usehttppath")
|
|
225
|
-
await self._run_git_config_global(
|
|
226
|
-
f"/usr/bin/env {config.SETTINGS.dev.git_credential_helper}", setting_name="credential.helper"
|
|
227
|
-
)
|
|
226
|
+
await self._run_git_config_global(config.SETTINGS.dev.git_credential_helper, setting_name="credential.helper")
|
|
228
227
|
|
|
229
228
|
async def _run_git_config_global(self, *args: str, setting_name: str) -> None:
|
|
230
229
|
proc = await asyncio.create_subprocess_exec(
|
infrahub_sdk/client.py
CHANGED
|
@@ -34,7 +34,7 @@ from .config import Config
|
|
|
34
34
|
from .constants import InfrahubClientMode
|
|
35
35
|
from .convert_object_type import CONVERT_OBJECT_MUTATION, ConversionFieldInput
|
|
36
36
|
from .data import RepositoryBranchInfo, RepositoryData
|
|
37
|
-
from .diff import
|
|
37
|
+
from .diff import NodeDiff, diff_tree_node_to_node_diff, get_diff_summary_query
|
|
38
38
|
from .exceptions import (
|
|
39
39
|
AuthenticationError,
|
|
40
40
|
Error,
|
|
@@ -1282,62 +1282,6 @@ class InfrahubClient(BaseClient):
|
|
|
1282
1282
|
|
|
1283
1283
|
return node_diffs
|
|
1284
1284
|
|
|
1285
|
-
async def get_diff_tree(
|
|
1286
|
-
self,
|
|
1287
|
-
branch: str,
|
|
1288
|
-
name: str | None = None,
|
|
1289
|
-
from_time: datetime | None = None,
|
|
1290
|
-
to_time: datetime | None = None,
|
|
1291
|
-
timeout: int | None = None,
|
|
1292
|
-
tracker: str | None = None,
|
|
1293
|
-
) -> DiffTreeData | None:
|
|
1294
|
-
"""Get complete diff tree with metadata and nodes.
|
|
1295
|
-
|
|
1296
|
-
Returns None if no diff exists.
|
|
1297
|
-
"""
|
|
1298
|
-
query = get_diff_tree_query()
|
|
1299
|
-
input_data = {"branch_name": branch}
|
|
1300
|
-
if name:
|
|
1301
|
-
input_data["name"] = name
|
|
1302
|
-
if from_time and to_time and from_time > to_time:
|
|
1303
|
-
raise ValueError("from_time must be <= to_time")
|
|
1304
|
-
if from_time:
|
|
1305
|
-
input_data["from_time"] = from_time.isoformat()
|
|
1306
|
-
if to_time:
|
|
1307
|
-
input_data["to_time"] = to_time.isoformat()
|
|
1308
|
-
|
|
1309
|
-
response = await self.execute_graphql(
|
|
1310
|
-
query=query.render(),
|
|
1311
|
-
branch_name=branch,
|
|
1312
|
-
timeout=timeout,
|
|
1313
|
-
tracker=tracker,
|
|
1314
|
-
variables=input_data,
|
|
1315
|
-
)
|
|
1316
|
-
|
|
1317
|
-
diff_tree = response["DiffTree"]
|
|
1318
|
-
if diff_tree is None:
|
|
1319
|
-
return None
|
|
1320
|
-
|
|
1321
|
-
# Convert nodes to NodeDiff objects
|
|
1322
|
-
node_diffs: list[NodeDiff] = []
|
|
1323
|
-
if "nodes" in diff_tree:
|
|
1324
|
-
for node_dict in diff_tree["nodes"]:
|
|
1325
|
-
node_diff = diff_tree_node_to_node_diff(node_dict=node_dict, branch_name=branch)
|
|
1326
|
-
node_diffs.append(node_diff)
|
|
1327
|
-
|
|
1328
|
-
return DiffTreeData(
|
|
1329
|
-
num_added=diff_tree.get("num_added") or 0,
|
|
1330
|
-
num_updated=diff_tree.get("num_updated") or 0,
|
|
1331
|
-
num_removed=diff_tree.get("num_removed") or 0,
|
|
1332
|
-
num_conflicts=diff_tree.get("num_conflicts") or 0,
|
|
1333
|
-
to_time=diff_tree["to_time"],
|
|
1334
|
-
from_time=diff_tree["from_time"],
|
|
1335
|
-
base_branch=diff_tree["base_branch"],
|
|
1336
|
-
diff_branch=diff_tree["diff_branch"],
|
|
1337
|
-
name=diff_tree.get("name"),
|
|
1338
|
-
nodes=node_diffs,
|
|
1339
|
-
)
|
|
1340
|
-
|
|
1341
1285
|
@overload
|
|
1342
1286
|
async def allocate_next_ip_address(
|
|
1343
1287
|
self,
|
|
@@ -2576,62 +2520,6 @@ class InfrahubClientSync(BaseClient):
|
|
|
2576
2520
|
|
|
2577
2521
|
return node_diffs
|
|
2578
2522
|
|
|
2579
|
-
def get_diff_tree(
|
|
2580
|
-
self,
|
|
2581
|
-
branch: str,
|
|
2582
|
-
name: str | None = None,
|
|
2583
|
-
from_time: datetime | None = None,
|
|
2584
|
-
to_time: datetime | None = None,
|
|
2585
|
-
timeout: int | None = None,
|
|
2586
|
-
tracker: str | None = None,
|
|
2587
|
-
) -> DiffTreeData | None:
|
|
2588
|
-
"""Get complete diff tree with metadata and nodes.
|
|
2589
|
-
|
|
2590
|
-
Returns None if no diff exists.
|
|
2591
|
-
"""
|
|
2592
|
-
query = get_diff_tree_query()
|
|
2593
|
-
input_data = {"branch_name": branch}
|
|
2594
|
-
if name:
|
|
2595
|
-
input_data["name"] = name
|
|
2596
|
-
if from_time and to_time and from_time > to_time:
|
|
2597
|
-
raise ValueError("from_time must be <= to_time")
|
|
2598
|
-
if from_time:
|
|
2599
|
-
input_data["from_time"] = from_time.isoformat()
|
|
2600
|
-
if to_time:
|
|
2601
|
-
input_data["to_time"] = to_time.isoformat()
|
|
2602
|
-
|
|
2603
|
-
response = self.execute_graphql(
|
|
2604
|
-
query=query.render(),
|
|
2605
|
-
branch_name=branch,
|
|
2606
|
-
timeout=timeout,
|
|
2607
|
-
tracker=tracker,
|
|
2608
|
-
variables=input_data,
|
|
2609
|
-
)
|
|
2610
|
-
|
|
2611
|
-
diff_tree = response["DiffTree"]
|
|
2612
|
-
if diff_tree is None:
|
|
2613
|
-
return None
|
|
2614
|
-
|
|
2615
|
-
# Convert nodes to NodeDiff objects
|
|
2616
|
-
node_diffs: list[NodeDiff] = []
|
|
2617
|
-
if "nodes" in diff_tree:
|
|
2618
|
-
for node_dict in diff_tree["nodes"]:
|
|
2619
|
-
node_diff = diff_tree_node_to_node_diff(node_dict=node_dict, branch_name=branch)
|
|
2620
|
-
node_diffs.append(node_diff)
|
|
2621
|
-
|
|
2622
|
-
return DiffTreeData(
|
|
2623
|
-
num_added=diff_tree.get("num_added") or 0,
|
|
2624
|
-
num_updated=diff_tree.get("num_updated") or 0,
|
|
2625
|
-
num_removed=diff_tree.get("num_removed") or 0,
|
|
2626
|
-
num_conflicts=diff_tree.get("num_conflicts") or 0,
|
|
2627
|
-
to_time=diff_tree["to_time"],
|
|
2628
|
-
from_time=diff_tree["from_time"],
|
|
2629
|
-
base_branch=diff_tree["base_branch"],
|
|
2630
|
-
diff_branch=diff_tree["diff_branch"],
|
|
2631
|
-
name=diff_tree.get("name"),
|
|
2632
|
-
nodes=node_diffs,
|
|
2633
|
-
)
|
|
2634
|
-
|
|
2635
2523
|
@overload
|
|
2636
2524
|
def allocate_next_ip_address(
|
|
2637
2525
|
self,
|