infrahub-server 1.3.7__py3-none-any.whl → 1.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infrahub/api/internal.py +5 -0
- infrahub/artifacts/tasks.py +17 -22
- infrahub/branch/merge_mutation_checker.py +38 -0
- infrahub/cli/__init__.py +2 -2
- infrahub/cli/context.py +7 -3
- infrahub/cli/db.py +5 -16
- infrahub/cli/upgrade.py +10 -29
- infrahub/computed_attribute/tasks.py +36 -46
- infrahub/config.py +57 -6
- infrahub/constants/environment.py +1 -0
- infrahub/core/attribute.py +15 -7
- infrahub/core/branch/tasks.py +43 -41
- infrahub/core/constants/__init__.py +21 -6
- infrahub/core/constants/infrahubkind.py +2 -0
- infrahub/core/diff/coordinator.py +3 -1
- infrahub/core/diff/model/path.py +0 -39
- infrahub/core/diff/repository/repository.py +0 -8
- infrahub/core/diff/tasks.py +11 -8
- infrahub/core/graph/__init__.py +1 -1
- infrahub/core/graph/index.py +1 -2
- infrahub/core/graph/schema.py +50 -29
- infrahub/core/initialization.py +81 -47
- infrahub/core/ipam/tasks.py +4 -3
- infrahub/core/merge.py +8 -10
- infrahub/core/migrations/__init__.py +2 -0
- infrahub/core/migrations/graph/__init__.py +4 -0
- infrahub/core/migrations/graph/m036_drop_attr_value_index.py +45 -0
- infrahub/core/migrations/graph/m037_index_attr_vals.py +577 -0
- infrahub/core/migrations/query/attribute_add.py +27 -2
- infrahub/core/migrations/schema/attribute_kind_update.py +156 -0
- infrahub/core/migrations/schema/tasks.py +6 -5
- infrahub/core/models.py +5 -1
- infrahub/core/node/proposed_change.py +43 -0
- infrahub/core/protocols.py +12 -0
- infrahub/core/query/attribute.py +32 -14
- infrahub/core/query/diff.py +11 -0
- infrahub/core/query/ipam.py +13 -7
- infrahub/core/query/node.py +51 -10
- infrahub/core/query/resource_manager.py +3 -3
- infrahub/core/schema/basenode_schema.py +8 -0
- infrahub/core/schema/definitions/core/__init__.py +10 -1
- infrahub/core/schema/definitions/core/ipam.py +28 -2
- infrahub/core/schema/definitions/core/propose_change.py +15 -0
- infrahub/core/schema/definitions/core/webhook.py +3 -0
- infrahub/core/schema/definitions/internal.py +1 -1
- infrahub/core/schema/generated/attribute_schema.py +1 -1
- infrahub/core/schema/generic_schema.py +10 -0
- infrahub/core/schema/manager.py +10 -1
- infrahub/core/schema/node_schema.py +22 -22
- infrahub/core/schema/profile_schema.py +8 -0
- infrahub/core/schema/schema_branch.py +11 -7
- infrahub/core/schema/template_schema.py +8 -0
- infrahub/core/validators/attribute/kind.py +5 -1
- infrahub/core/validators/checks_runner.py +5 -5
- infrahub/core/validators/determiner.py +22 -2
- infrahub/core/validators/tasks.py +6 -7
- infrahub/core/validators/uniqueness/checker.py +4 -2
- infrahub/core/validators/uniqueness/model.py +1 -0
- infrahub/core/validators/uniqueness/query.py +57 -7
- infrahub/database/__init__.py +2 -1
- infrahub/events/__init__.py +20 -0
- infrahub/events/constants.py +7 -0
- infrahub/events/generator.py +29 -2
- infrahub/events/proposed_change_action.py +203 -0
- infrahub/generators/tasks.py +24 -20
- infrahub/git/base.py +4 -7
- infrahub/git/integrator.py +21 -12
- infrahub/git/repository.py +15 -30
- infrahub/git/tasks.py +121 -106
- infrahub/graphql/app.py +2 -1
- infrahub/graphql/field_extractor.py +69 -0
- infrahub/graphql/manager.py +15 -11
- infrahub/graphql/mutations/account.py +2 -2
- infrahub/graphql/mutations/action.py +8 -2
- infrahub/graphql/mutations/artifact_definition.py +4 -1
- infrahub/graphql/mutations/branch.py +10 -5
- infrahub/graphql/mutations/graphql_query.py +2 -1
- infrahub/graphql/mutations/main.py +14 -8
- infrahub/graphql/mutations/menu.py +2 -1
- infrahub/graphql/mutations/proposed_change.py +230 -8
- infrahub/graphql/mutations/relationship.py +5 -0
- infrahub/graphql/mutations/repository.py +2 -1
- infrahub/graphql/mutations/tasks.py +7 -9
- infrahub/graphql/mutations/webhook.py +4 -1
- infrahub/graphql/parser.py +15 -6
- infrahub/graphql/queries/__init__.py +10 -1
- infrahub/graphql/queries/account.py +3 -3
- infrahub/graphql/queries/branch.py +2 -2
- infrahub/graphql/queries/diff/tree.py +56 -5
- infrahub/graphql/queries/event.py +13 -3
- infrahub/graphql/queries/ipam.py +23 -1
- infrahub/graphql/queries/proposed_change.py +84 -0
- infrahub/graphql/queries/relationship.py +2 -2
- infrahub/graphql/queries/resource_manager.py +3 -3
- infrahub/graphql/queries/search.py +3 -2
- infrahub/graphql/queries/status.py +3 -2
- infrahub/graphql/queries/task.py +2 -2
- infrahub/graphql/resolvers/ipam.py +440 -0
- infrahub/graphql/resolvers/many_relationship.py +4 -3
- infrahub/graphql/resolvers/resolver.py +5 -5
- infrahub/graphql/resolvers/single_relationship.py +3 -2
- infrahub/graphql/schema.py +25 -5
- infrahub/graphql/types/__init__.py +2 -2
- infrahub/graphql/types/attribute.py +3 -3
- infrahub/graphql/types/event.py +68 -0
- infrahub/groups/tasks.py +6 -6
- infrahub/lock.py +3 -2
- infrahub/menu/generator.py +8 -0
- infrahub/message_bus/operations/__init__.py +9 -12
- infrahub/message_bus/operations/git/file.py +6 -5
- infrahub/message_bus/operations/git/repository.py +12 -20
- infrahub/message_bus/operations/refresh/registry.py +15 -9
- infrahub/message_bus/operations/send/echo.py +7 -4
- infrahub/message_bus/types.py +1 -0
- infrahub/permissions/__init__.py +2 -1
- infrahub/permissions/constants.py +13 -0
- infrahub/permissions/globals.py +31 -2
- infrahub/permissions/manager.py +8 -5
- infrahub/pools/prefix.py +7 -5
- infrahub/prefect_server/app.py +31 -0
- infrahub/prefect_server/bootstrap.py +18 -0
- infrahub/proposed_change/action_checker.py +206 -0
- infrahub/proposed_change/approval_revoker.py +40 -0
- infrahub/proposed_change/branch_diff.py +3 -1
- infrahub/proposed_change/checker.py +45 -0
- infrahub/proposed_change/constants.py +32 -2
- infrahub/proposed_change/tasks.py +182 -150
- infrahub/py.typed +0 -0
- infrahub/server.py +29 -17
- infrahub/services/__init__.py +13 -28
- infrahub/services/adapters/cache/__init__.py +4 -0
- infrahub/services/adapters/cache/nats.py +2 -0
- infrahub/services/adapters/cache/redis.py +3 -0
- infrahub/services/adapters/message_bus/__init__.py +0 -2
- infrahub/services/adapters/message_bus/local.py +1 -2
- infrahub/services/adapters/message_bus/nats.py +6 -8
- infrahub/services/adapters/message_bus/rabbitmq.py +7 -9
- infrahub/services/adapters/workflow/__init__.py +1 -0
- infrahub/services/adapters/workflow/local.py +1 -8
- infrahub/services/component.py +2 -1
- infrahub/task_manager/event.py +56 -0
- infrahub/task_manager/models.py +9 -0
- infrahub/tasks/artifact.py +6 -7
- infrahub/tasks/check.py +4 -7
- infrahub/telemetry/tasks.py +15 -18
- infrahub/transformations/tasks.py +10 -6
- infrahub/trigger/tasks.py +4 -3
- infrahub/types.py +4 -0
- infrahub/validators/events.py +7 -7
- infrahub/validators/tasks.py +6 -7
- infrahub/webhook/models.py +45 -45
- infrahub/webhook/tasks.py +25 -24
- infrahub/workers/dependencies.py +143 -0
- infrahub/workers/infrahub_async.py +19 -43
- infrahub/workflows/catalogue.py +16 -2
- infrahub/workflows/initialization.py +5 -4
- infrahub/workflows/models.py +2 -0
- infrahub_sdk/client.py +2 -2
- infrahub_sdk/ctl/repository.py +51 -0
- infrahub_sdk/ctl/schema.py +9 -9
- infrahub_sdk/node/node.py +2 -2
- infrahub_sdk/pytest_plugin/items/graphql_query.py +1 -1
- infrahub_sdk/schema/repository.py +1 -1
- infrahub_sdk/testing/docker.py +1 -1
- infrahub_sdk/utils.py +2 -2
- {infrahub_server-1.3.7.dist-info → infrahub_server-1.4.0.dist-info}/METADATA +7 -5
- {infrahub_server-1.3.7.dist-info → infrahub_server-1.4.0.dist-info}/RECORD +174 -158
- infrahub_testcontainers/container.py +17 -0
- infrahub_testcontainers/docker-compose-cluster.test.yml +56 -1
- infrahub_testcontainers/docker-compose.test.yml +56 -1
- infrahub_testcontainers/helpers.py +4 -1
- {infrahub_server-1.3.7.dist-info → infrahub_server-1.4.0.dist-info}/LICENSE.txt +0 -0
- {infrahub_server-1.3.7.dist-info → infrahub_server-1.4.0.dist-info}/WHEEL +0 -0
- {infrahub_server-1.3.7.dist-info → infrahub_server-1.4.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,440 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import ipaddress
|
|
4
|
+
from typing import TYPE_CHECKING, Any
|
|
5
|
+
|
|
6
|
+
from graphql.type.definition import GraphQLNonNull
|
|
7
|
+
from netaddr import IPSet
|
|
8
|
+
from opentelemetry import trace
|
|
9
|
+
|
|
10
|
+
from infrahub.core import registry
|
|
11
|
+
from infrahub.core.constants import InfrahubKind
|
|
12
|
+
from infrahub.core.ipam.constants import PrefixMemberType
|
|
13
|
+
from infrahub.core.manager import NodeManager
|
|
14
|
+
from infrahub.core.node import Node
|
|
15
|
+
from infrahub.core.protocols import BuiltinIPNamespace, BuiltinIPPrefix
|
|
16
|
+
from infrahub.core.schema.generic_schema import GenericSchema
|
|
17
|
+
from infrahub.exceptions import ValidationError
|
|
18
|
+
from infrahub.graphql.parser import extract_selection
|
|
19
|
+
from infrahub.graphql.permissions import get_permissions
|
|
20
|
+
|
|
21
|
+
from ..models import OrderModel
|
|
22
|
+
|
|
23
|
+
if TYPE_CHECKING:
|
|
24
|
+
from collections.abc import Sequence
|
|
25
|
+
|
|
26
|
+
from graphql import GraphQLResolveInfo
|
|
27
|
+
from pydantic import IPvAnyAddress, IPvAnyInterface, IPvAnyNetwork
|
|
28
|
+
|
|
29
|
+
from infrahub.core.branch.models import Branch
|
|
30
|
+
from infrahub.core.schema import NodeSchema
|
|
31
|
+
from infrahub.database import InfrahubDatabase
|
|
32
|
+
from infrahub.graphql.initialization import GraphqlContext
|
|
33
|
+
from infrahub.graphql.models import OrderModel
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _ip_range_display_label(node: Node) -> str:
|
|
37
|
+
"""Return a human friendly summary of an IP range"""
|
|
38
|
+
size = int(node.last_address.obj) - int(node.address.obj) + 1
|
|
39
|
+
|
|
40
|
+
if size == 1:
|
|
41
|
+
return "1 IP address available"
|
|
42
|
+
if size <= 2**16:
|
|
43
|
+
return f"{size} IP addresses available"
|
|
44
|
+
return f"More than {2**16} IP addresses available"
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _ip_with_prefix_length(ip_address: IPvAnyAddress, ip_prefix: IPvAnyNetwork) -> IPvAnyInterface:
|
|
48
|
+
"""Convert an `IPAddress` object into an `IPInterface` one given a `IPNetwork`."""
|
|
49
|
+
return ipaddress.ip_interface(f"{ip_address}/{ip_prefix.prefixlen}")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
async def _build_ip_range_node(
|
|
53
|
+
db: InfrahubDatabase,
|
|
54
|
+
branch: Branch,
|
|
55
|
+
schema: NodeSchema,
|
|
56
|
+
address: IPvAnyAddress,
|
|
57
|
+
last_address: IPvAnyAddress,
|
|
58
|
+
ip_namespace: BuiltinIPNamespace,
|
|
59
|
+
ip_prefix: BuiltinIPPrefix,
|
|
60
|
+
) -> Node:
|
|
61
|
+
address_with_len = str(_ip_with_prefix_length(ip_address=address, ip_prefix=ip_prefix.prefix.obj))
|
|
62
|
+
last_address_with_len = str(_ip_with_prefix_length(ip_address=last_address, ip_prefix=ip_prefix.prefix.obj))
|
|
63
|
+
|
|
64
|
+
n = await Node.init(schema=schema, db=db, branch=branch)
|
|
65
|
+
await n.new(
|
|
66
|
+
db=db,
|
|
67
|
+
address=address_with_len,
|
|
68
|
+
last_address=last_address_with_len,
|
|
69
|
+
description=f"Available IP range {address_with_len} - {last_address_with_len}",
|
|
70
|
+
ip_namespace=ip_namespace,
|
|
71
|
+
ip_prefix=ip_prefix,
|
|
72
|
+
)
|
|
73
|
+
return n
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def _include_first_and_last_ips(ip_prefix: BuiltinIPPrefix) -> bool:
|
|
77
|
+
if ip_prefix.prefix.version == 6 or ip_prefix.is_pool.value:
|
|
78
|
+
return True
|
|
79
|
+
|
|
80
|
+
return ip_prefix.member_type.value == PrefixMemberType.ADDRESS.value and ip_prefix.prefix.prefixlen == 31
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
async def _resolve_available_address_nodes(
|
|
84
|
+
db: InfrahubDatabase,
|
|
85
|
+
branch: Branch,
|
|
86
|
+
prefix: BuiltinIPPrefix,
|
|
87
|
+
existing_nodes: Sequence[Node],
|
|
88
|
+
first_node_context: Node | None = None,
|
|
89
|
+
last_node_context: Node | None = None,
|
|
90
|
+
) -> list[Node]:
|
|
91
|
+
"""Annotate a list of IP addresses node with available ranges within a prefix."""
|
|
92
|
+
ip_prefix: IPvAnyNetwork = prefix.prefix.obj
|
|
93
|
+
ip_namespace = await prefix.ip_namespace.get_peer(db=db, peer_type=BuiltinIPNamespace, raise_on_error=True)
|
|
94
|
+
ip_range_schema = registry.get_node_schema(name=InfrahubKind.IPRANGEAVAILABLE, branch=branch)
|
|
95
|
+
|
|
96
|
+
# Make sure nodes are ordered by addresses
|
|
97
|
+
sorted_nodes = sorted(existing_nodes, key=lambda n: n.address.obj)
|
|
98
|
+
prefix_first_address = (
|
|
99
|
+
ip_prefix.network_address if _include_first_and_last_ips(ip_prefix=prefix) else ip_prefix.network_address + 1
|
|
100
|
+
)
|
|
101
|
+
prefix_last_address = (
|
|
102
|
+
ip_prefix.broadcast_address
|
|
103
|
+
if _include_first_and_last_ips(ip_prefix=prefix)
|
|
104
|
+
else ip_prefix.broadcast_address - 1
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
if not sorted_nodes:
|
|
108
|
+
return [
|
|
109
|
+
await _build_ip_range_node(
|
|
110
|
+
db=db,
|
|
111
|
+
branch=branch,
|
|
112
|
+
schema=ip_range_schema,
|
|
113
|
+
address=prefix_first_address,
|
|
114
|
+
last_address=prefix_last_address,
|
|
115
|
+
ip_namespace=ip_namespace,
|
|
116
|
+
ip_prefix=prefix,
|
|
117
|
+
)
|
|
118
|
+
]
|
|
119
|
+
|
|
120
|
+
first_address: IPvAnyAddress = prefix_first_address
|
|
121
|
+
last_address: IPvAnyAddress = prefix_last_address
|
|
122
|
+
|
|
123
|
+
# Use but exclude context addresses to avoid having them in the result
|
|
124
|
+
if first_node_context:
|
|
125
|
+
first_address = first_node_context.address.obj.ip + 1
|
|
126
|
+
if last_node_context:
|
|
127
|
+
last_address = last_node_context.address.obj.ip - 1
|
|
128
|
+
|
|
129
|
+
with_available_ranges: list[Node] = []
|
|
130
|
+
previous_address: IPvAnyAddress | None = None
|
|
131
|
+
|
|
132
|
+
# Look for a gap at the beginning of the prefix
|
|
133
|
+
if sorted_nodes[0].address.obj.ip > first_address:
|
|
134
|
+
with_available_ranges.append(
|
|
135
|
+
await _build_ip_range_node(
|
|
136
|
+
db=db,
|
|
137
|
+
branch=branch,
|
|
138
|
+
schema=ip_range_schema,
|
|
139
|
+
address=first_address,
|
|
140
|
+
last_address=sorted_nodes[0].address.obj.ip - 1,
|
|
141
|
+
ip_namespace=ip_namespace,
|
|
142
|
+
ip_prefix=prefix,
|
|
143
|
+
)
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
# Look for gaps between existing addresses
|
|
147
|
+
for existing in sorted_nodes:
|
|
148
|
+
current = existing.address.obj.ip
|
|
149
|
+
if previous_address:
|
|
150
|
+
if int(current) - int(previous_address) > 1:
|
|
151
|
+
with_available_ranges.append(
|
|
152
|
+
await _build_ip_range_node(
|
|
153
|
+
db=db,
|
|
154
|
+
branch=branch,
|
|
155
|
+
schema=ip_range_schema,
|
|
156
|
+
address=previous_address + 1,
|
|
157
|
+
last_address=current - 1,
|
|
158
|
+
ip_namespace=ip_namespace,
|
|
159
|
+
ip_prefix=prefix,
|
|
160
|
+
)
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
with_available_ranges.append(existing)
|
|
164
|
+
previous_address = existing.address.obj.ip
|
|
165
|
+
|
|
166
|
+
# Look for a gap at the end of the prefix
|
|
167
|
+
if previous_address and previous_address < last_address:
|
|
168
|
+
with_available_ranges.append(
|
|
169
|
+
await _build_ip_range_node(
|
|
170
|
+
db=db,
|
|
171
|
+
branch=branch,
|
|
172
|
+
schema=ip_range_schema,
|
|
173
|
+
address=previous_address + 1,
|
|
174
|
+
last_address=last_address,
|
|
175
|
+
ip_namespace=ip_namespace,
|
|
176
|
+
ip_prefix=prefix,
|
|
177
|
+
)
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
return with_available_ranges
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
async def _resolve_available_prefix_nodes(
|
|
184
|
+
db: InfrahubDatabase,
|
|
185
|
+
branch: Branch,
|
|
186
|
+
prefix: BuiltinIPPrefix,
|
|
187
|
+
existing_nodes: Sequence[Node],
|
|
188
|
+
first_node_context: Node | None = None,
|
|
189
|
+
last_node_context: Node | None = None,
|
|
190
|
+
) -> list[Node]:
|
|
191
|
+
"""Annotate a list of IP prefixes node with available prefixes within a parent one."""
|
|
192
|
+
ip_prefix_schema = registry.get_node_schema(name=InfrahubKind.IPPREFIXAVAILABLE, branch=branch)
|
|
193
|
+
|
|
194
|
+
existing_prefixes = IPSet([n.prefix.value for n in existing_nodes])
|
|
195
|
+
if first_node_context:
|
|
196
|
+
existing_prefixes.add(first_node_context.prefix.value)
|
|
197
|
+
if last_node_context:
|
|
198
|
+
existing_prefixes.add(last_node_context.prefix.value)
|
|
199
|
+
|
|
200
|
+
# Infer which prefixes are actually available
|
|
201
|
+
available_prefixes = IPSet([prefix.prefix.value]) ^ existing_prefixes
|
|
202
|
+
available_nodes: list[Node] = []
|
|
203
|
+
|
|
204
|
+
# Turn them into nodes (without saving them in the database)
|
|
205
|
+
for available_prefix in available_prefixes.iter_cidrs():
|
|
206
|
+
p = ipaddress.ip_network(str(available_prefix))
|
|
207
|
+
if (first_node_context and p < first_node_context.prefix.obj) or (
|
|
208
|
+
last_node_context and p > last_node_context.prefix.obj
|
|
209
|
+
):
|
|
210
|
+
continue
|
|
211
|
+
|
|
212
|
+
node = await Node.init(schema=ip_prefix_schema, db=db, branch=branch)
|
|
213
|
+
await node.new(
|
|
214
|
+
db=db, prefix=str(available_prefix), ip_namespace=await prefix.ip_namespace.get_peer(db=db), parent=prefix
|
|
215
|
+
)
|
|
216
|
+
available_nodes.append(node)
|
|
217
|
+
|
|
218
|
+
# Properly sort existing nodes with available prefixes
|
|
219
|
+
with_available_prefixes = sorted(existing_nodes + available_nodes, key=lambda n: n.prefix.obj)
|
|
220
|
+
|
|
221
|
+
if len(with_available_prefixes) > 1 or with_available_prefixes[0].prefix.obj != prefix.prefix.obj:
|
|
222
|
+
return with_available_prefixes
|
|
223
|
+
|
|
224
|
+
# If the only available prefix is the same as the container prefix, this means the container prefix is empty and we should therefore at least
|
|
225
|
+
# offer two smaller prefixes allocatable within it
|
|
226
|
+
available_nodes.clear()
|
|
227
|
+
|
|
228
|
+
for subnet in prefix.prefix.obj.subnets():
|
|
229
|
+
node = await Node.init(schema=ip_prefix_schema, db=db, branch=branch)
|
|
230
|
+
await node.new(db=db, prefix=str(subnet), ip_namespace=await prefix.ip_namespace.get_peer(db=db), parent=prefix)
|
|
231
|
+
available_nodes.append(node)
|
|
232
|
+
|
|
233
|
+
return available_nodes
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def _filter_kinds(nodes: list[Node], kinds: list[str], limit: int | None) -> list[Node]:
|
|
237
|
+
filtered: list[Node] = []
|
|
238
|
+
available_node_kinds = [InfrahubKind.IPPREFIXAVAILABLE, InfrahubKind.IPRANGEAVAILABLE]
|
|
239
|
+
kinds_with_available = kinds + available_node_kinds
|
|
240
|
+
|
|
241
|
+
limit_with_available = limit
|
|
242
|
+
for node in nodes:
|
|
243
|
+
if node.get_schema().kind not in kinds_with_available:
|
|
244
|
+
continue
|
|
245
|
+
# Adapt the limit of nodes to return by always including available ones
|
|
246
|
+
if limit and node.get_schema().kind in available_node_kinds:
|
|
247
|
+
limit_with_available += 1
|
|
248
|
+
filtered.append(node)
|
|
249
|
+
|
|
250
|
+
return filtered[:limit_with_available] if limit else filtered
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
async def _annotate_result(
|
|
254
|
+
db: InfrahubDatabase,
|
|
255
|
+
branch: Branch,
|
|
256
|
+
resolve_available: bool,
|
|
257
|
+
schema: NodeSchema | GenericSchema,
|
|
258
|
+
parent_prefix: BuiltinIPPrefix | None,
|
|
259
|
+
result: list[Node],
|
|
260
|
+
first_node_context: Node | None = None,
|
|
261
|
+
last_node_context: Node | None = None,
|
|
262
|
+
kinds_to_filter: list[str] | None = None,
|
|
263
|
+
limit: int | None = None,
|
|
264
|
+
) -> list[Node]:
|
|
265
|
+
nodes: list[Node] = result
|
|
266
|
+
|
|
267
|
+
if resolve_available and parent_prefix:
|
|
268
|
+
if schema.is_ip_address:
|
|
269
|
+
nodes = await _resolve_available_address_nodes(
|
|
270
|
+
db=db,
|
|
271
|
+
branch=branch,
|
|
272
|
+
prefix=parent_prefix,
|
|
273
|
+
existing_nodes=result,
|
|
274
|
+
first_node_context=first_node_context,
|
|
275
|
+
last_node_context=last_node_context,
|
|
276
|
+
)
|
|
277
|
+
else:
|
|
278
|
+
nodes = await _resolve_available_prefix_nodes(
|
|
279
|
+
db=db,
|
|
280
|
+
branch=branch,
|
|
281
|
+
prefix=parent_prefix,
|
|
282
|
+
existing_nodes=result,
|
|
283
|
+
first_node_context=first_node_context,
|
|
284
|
+
last_node_context=last_node_context,
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
return _filter_kinds(nodes=nodes, kinds=kinds_to_filter, limit=limit) if kinds_to_filter else nodes
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
@trace.get_tracer(__name__).start_as_current_span("ipam_paginated_list_resolver")
|
|
291
|
+
async def ipam_paginated_list_resolver( # noqa: PLR0915
|
|
292
|
+
root: dict, # noqa: ARG001
|
|
293
|
+
info: GraphQLResolveInfo,
|
|
294
|
+
offset: int | None = None,
|
|
295
|
+
limit: int | None = None,
|
|
296
|
+
order: OrderModel | None = None,
|
|
297
|
+
partial_match: bool = False,
|
|
298
|
+
**kwargs: dict[str, Any],
|
|
299
|
+
) -> dict[str, Any]:
|
|
300
|
+
schema: NodeSchema | GenericSchema = (
|
|
301
|
+
info.return_type.of_type.graphene_type._meta.schema
|
|
302
|
+
if isinstance(info.return_type, GraphQLNonNull)
|
|
303
|
+
else info.return_type.graphene_type._meta.schema
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
if not isinstance(schema, GenericSchema) or schema.kind not in [InfrahubKind.IPADDRESS, InfrahubKind.IPPREFIX]:
|
|
307
|
+
raise ValidationError(f"{schema.kind} is not {InfrahubKind.IPADDRESS} or {InfrahubKind.IPPREFIX}")
|
|
308
|
+
|
|
309
|
+
fields = await extract_selection(info=info, schema=schema)
|
|
310
|
+
resolve_available = bool(kwargs.pop("include_available", False))
|
|
311
|
+
kinds_to_filter: list[str] = kwargs.pop("kinds", []) # type: ignore[assignment]
|
|
312
|
+
|
|
313
|
+
for kind in kinds_to_filter:
|
|
314
|
+
if kind not in schema.used_by:
|
|
315
|
+
raise ValidationError(f"{kind} is not a node inheriting from {schema.kind}")
|
|
316
|
+
|
|
317
|
+
graphql_context: GraphqlContext = info.context
|
|
318
|
+
async with graphql_context.db.start_session(read_only=True) as db:
|
|
319
|
+
response: dict[str, Any] = {"edges": []}
|
|
320
|
+
filters = {
|
|
321
|
+
key: value for key, value in kwargs.items() if ("__" in key and value is not None) or key in ("ids", "hfid")
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
edges = fields.get("edges", {})
|
|
325
|
+
node_fields = edges.get("node", {})
|
|
326
|
+
|
|
327
|
+
permission_set: dict[str, Any] | None = None
|
|
328
|
+
permissions = (
|
|
329
|
+
await get_permissions(schema=schema, graphql_context=graphql_context)
|
|
330
|
+
if graphql_context.permissions
|
|
331
|
+
else None
|
|
332
|
+
)
|
|
333
|
+
if fields.get("permissions"):
|
|
334
|
+
response["permissions"] = permissions
|
|
335
|
+
|
|
336
|
+
if permissions:
|
|
337
|
+
for edge in permissions["edges"]:
|
|
338
|
+
if edge["node"]["kind"] == schema.kind:
|
|
339
|
+
permission_set = edge["node"]
|
|
340
|
+
|
|
341
|
+
parent_prefix_id = ""
|
|
342
|
+
if schema.is_ip_address and "ip_prefix__ids" in filters:
|
|
343
|
+
parent_prefix_id = next(iter(filters["ip_prefix__ids"]))
|
|
344
|
+
if schema.is_ip_prefix and "parent__ids" in filters:
|
|
345
|
+
parent_prefix_id = next(iter(filters["parent__ids"]))
|
|
346
|
+
|
|
347
|
+
parent_prefix: BuiltinIPPrefix | None = None
|
|
348
|
+
if parent_prefix_id:
|
|
349
|
+
parent_prefix = await NodeManager.get_one(
|
|
350
|
+
db=db, kind=BuiltinIPPrefix, id=parent_prefix_id, at=graphql_context.at, branch=graphql_context.branch
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
first_node_context: Node | None = None
|
|
354
|
+
fetch_first_node_context = False
|
|
355
|
+
if offset is not None and offset > 0:
|
|
356
|
+
offset -= 1
|
|
357
|
+
fetch_first_node_context = True
|
|
358
|
+
|
|
359
|
+
last_node_context: Node | None = None
|
|
360
|
+
fetch_last_node_context = False
|
|
361
|
+
if limit is not None and limit > 0:
|
|
362
|
+
limit += 1
|
|
363
|
+
fetch_last_node_context = True
|
|
364
|
+
|
|
365
|
+
# Since we are going to narrow down the number of nodes in the end, we will query for a larger set (that can potentially include all kinds of
|
|
366
|
+
# implementations) in the first place to make sure that we will fill in the page to its maximum
|
|
367
|
+
query_limit = limit
|
|
368
|
+
if kinds_to_filter and limit:
|
|
369
|
+
query_limit *= len(schema.used_by)
|
|
370
|
+
|
|
371
|
+
objs = []
|
|
372
|
+
if edges or "hfid" in filters:
|
|
373
|
+
objs = await NodeManager.query(
|
|
374
|
+
db=db,
|
|
375
|
+
schema=schema,
|
|
376
|
+
filters=filters or None,
|
|
377
|
+
fields=node_fields,
|
|
378
|
+
at=graphql_context.at,
|
|
379
|
+
branch=graphql_context.branch,
|
|
380
|
+
limit=query_limit,
|
|
381
|
+
offset=offset,
|
|
382
|
+
account=graphql_context.account_session,
|
|
383
|
+
include_source=True,
|
|
384
|
+
include_owner=True,
|
|
385
|
+
partial_match=partial_match,
|
|
386
|
+
order=order,
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
if fetch_first_node_context and len(objs) > 2:
|
|
390
|
+
first_node_context = objs[0]
|
|
391
|
+
objs = objs[1:]
|
|
392
|
+
if fetch_last_node_context and len(objs) >= limit >= 2:
|
|
393
|
+
last_node_context = objs[-1]
|
|
394
|
+
objs = objs[:-1]
|
|
395
|
+
|
|
396
|
+
if "count" in fields:
|
|
397
|
+
if filters.get("hfid"):
|
|
398
|
+
response["count"] = len(objs)
|
|
399
|
+
else:
|
|
400
|
+
response["count"] = await NodeManager.count(
|
|
401
|
+
db=db,
|
|
402
|
+
schema=schema,
|
|
403
|
+
filters=filters,
|
|
404
|
+
at=graphql_context.at,
|
|
405
|
+
branch=graphql_context.branch,
|
|
406
|
+
partial_match=partial_match,
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
result = await _annotate_result(
|
|
410
|
+
db=db,
|
|
411
|
+
branch=graphql_context.branch,
|
|
412
|
+
resolve_available=resolve_available,
|
|
413
|
+
schema=schema,
|
|
414
|
+
parent_prefix=parent_prefix,
|
|
415
|
+
result=objs,
|
|
416
|
+
first_node_context=first_node_context,
|
|
417
|
+
last_node_context=last_node_context,
|
|
418
|
+
kinds_to_filter=kinds_to_filter,
|
|
419
|
+
limit=limit,
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
if result:
|
|
423
|
+
objects = []
|
|
424
|
+
for obj in result:
|
|
425
|
+
obj_data = await obj.to_graphql(
|
|
426
|
+
db=db,
|
|
427
|
+
fields=node_fields,
|
|
428
|
+
related_node_ids=graphql_context.related_node_ids,
|
|
429
|
+
permissions=permission_set,
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
# Override display label for available IP ranges
|
|
433
|
+
if obj.get_schema().kind == InfrahubKind.IPRANGEAVAILABLE and "display_label" in obj_data:
|
|
434
|
+
obj_data["display_label"] = _ip_range_display_label(node=obj)
|
|
435
|
+
|
|
436
|
+
objects.append({"node": obj_data})
|
|
437
|
+
|
|
438
|
+
response["edges"] = objects
|
|
439
|
+
|
|
440
|
+
return response
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from typing import TYPE_CHECKING, Any
|
|
2
2
|
|
|
3
3
|
from graphql import GraphQLResolveInfo
|
|
4
|
-
from infrahub_sdk.utils import deep_merge_dict
|
|
4
|
+
from infrahub_sdk.utils import deep_merge_dict
|
|
5
5
|
|
|
6
6
|
from infrahub.core.branch.models import Branch
|
|
7
7
|
from infrahub.core.constants import BranchSupportType, RelationshipHierarchyDirection
|
|
@@ -11,6 +11,7 @@ from infrahub.core.schema.node_schema import NodeSchema
|
|
|
11
11
|
from infrahub.core.schema.relationship_schema import RelationshipSchema
|
|
12
12
|
from infrahub.core.timestamp import Timestamp
|
|
13
13
|
from infrahub.database import InfrahubDatabase
|
|
14
|
+
from infrahub.graphql.field_extractor import extract_graphql_fields
|
|
14
15
|
|
|
15
16
|
from ..loaders.peers import PeerRelationshipsDataLoader, QueryPeerParams
|
|
16
17
|
from ..types import RELATIONS_PROPERTY_MAP, RELATIONS_PROPERTY_MAP_REVERSED
|
|
@@ -81,14 +82,14 @@ class ManyRelationshipResolver:
|
|
|
81
82
|
This resolver is used for paginated responses and as such we redefined the requested
|
|
82
83
|
fields by only reusing information below the 'node' key.
|
|
83
84
|
"""
|
|
84
|
-
# Extract the
|
|
85
|
+
# Extract the Infrahub schema by inspecting the GQL Schema
|
|
85
86
|
|
|
86
87
|
node_schema: MainSchemaTypes = info.parent_type.graphene_type._meta.schema # type: ignore[attr-defined]
|
|
87
88
|
|
|
88
89
|
graphql_context: GraphqlContext = info.context
|
|
89
90
|
|
|
90
91
|
# Extract the name of the fields in the GQL query
|
|
91
|
-
fields =
|
|
92
|
+
fields = extract_graphql_fields(info=info)
|
|
92
93
|
edges = fields.get("edges", {})
|
|
93
94
|
node_fields = edges.get("node", {})
|
|
94
95
|
property_fields = edges.get("properties", {})
|
|
@@ -3,12 +3,12 @@ from __future__ import annotations
|
|
|
3
3
|
from typing import TYPE_CHECKING, Any
|
|
4
4
|
|
|
5
5
|
from graphql.type.definition import GraphQLNonNull
|
|
6
|
-
from infrahub_sdk.utils import extract_fields
|
|
7
6
|
from opentelemetry import trace
|
|
8
7
|
|
|
9
8
|
from infrahub.core.constants import BranchSupportType, InfrahubKind, RelationshipHierarchyDirection
|
|
10
9
|
from infrahub.core.manager import NodeManager
|
|
11
10
|
from infrahub.exceptions import NodeNotFoundError
|
|
11
|
+
from infrahub.graphql.field_extractor import extract_graphql_fields
|
|
12
12
|
|
|
13
13
|
from ..models import OrderModel
|
|
14
14
|
from ..parser import extract_selection
|
|
@@ -26,7 +26,7 @@ async def account_resolver(
|
|
|
26
26
|
root: dict, # noqa: ARG001
|
|
27
27
|
info: GraphQLResolveInfo,
|
|
28
28
|
) -> dict:
|
|
29
|
-
fields =
|
|
29
|
+
fields = extract_graphql_fields(info=info)
|
|
30
30
|
graphql_context: GraphqlContext = info.context
|
|
31
31
|
|
|
32
32
|
async with graphql_context.db.start_session(read_only=True) as db:
|
|
@@ -90,7 +90,7 @@ async def default_resolver(*args: Any, **kwargs) -> dict | list[dict] | None:
|
|
|
90
90
|
graphql_context: GraphqlContext = info.context
|
|
91
91
|
|
|
92
92
|
# Extract the name of the fields in the GQL query
|
|
93
|
-
fields =
|
|
93
|
+
fields = extract_graphql_fields(info=info)
|
|
94
94
|
|
|
95
95
|
# Extract the schema of the node on the other end of the relationship from the GQL Schema
|
|
96
96
|
node_rel = node_schema.get_relationship(info.field_name)
|
|
@@ -155,7 +155,7 @@ async def default_paginated_list_resolver(
|
|
|
155
155
|
else info.return_type.graphene_type._meta.schema
|
|
156
156
|
)
|
|
157
157
|
|
|
158
|
-
fields = await extract_selection(info
|
|
158
|
+
fields = await extract_selection(info=info, schema=schema)
|
|
159
159
|
|
|
160
160
|
graphql_context: GraphqlContext = info.context
|
|
161
161
|
async with graphql_context.db.start_session(read_only=True) as db:
|
|
@@ -277,7 +277,7 @@ async def hierarchy_resolver(
|
|
|
277
277
|
graphql_context: GraphqlContext = info.context
|
|
278
278
|
|
|
279
279
|
# Extract the name of the fields in the GQL query
|
|
280
|
-
fields =
|
|
280
|
+
fields = extract_graphql_fields(info=info)
|
|
281
281
|
edges = fields.get("edges", {})
|
|
282
282
|
node_fields = edges.get("node", {})
|
|
283
283
|
|
|
@@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, Any
|
|
|
2
2
|
|
|
3
3
|
from graphql import GraphQLResolveInfo
|
|
4
4
|
from graphql.type.definition import GraphQLNonNull
|
|
5
|
-
from infrahub_sdk.utils import deep_merge_dict
|
|
5
|
+
from infrahub_sdk.utils import deep_merge_dict
|
|
6
6
|
|
|
7
7
|
from infrahub.core.branch.models import Branch
|
|
8
8
|
from infrahub.core.constants import BranchSupportType
|
|
@@ -10,6 +10,7 @@ from infrahub.core.manager import NodeManager
|
|
|
10
10
|
from infrahub.core.schema.relationship_schema import RelationshipSchema
|
|
11
11
|
from infrahub.core.timestamp import Timestamp
|
|
12
12
|
from infrahub.database import InfrahubDatabase
|
|
13
|
+
from infrahub.graphql.field_extractor import extract_graphql_fields
|
|
13
14
|
|
|
14
15
|
from ..loaders.node import GetManyParams, NodeDataLoader
|
|
15
16
|
from ..types import RELATIONS_PROPERTY_MAP, RELATIONS_PROPERTY_MAP_REVERSED
|
|
@@ -42,7 +43,7 @@ class SingleRelationshipResolver:
|
|
|
42
43
|
graphql_context: GraphqlContext = info.context
|
|
43
44
|
|
|
44
45
|
# Extract the name of the fields in the GQL query
|
|
45
|
-
fields =
|
|
46
|
+
fields = extract_graphql_fields(info=info)
|
|
46
47
|
node_fields = fields.get("node", {})
|
|
47
48
|
property_fields = fields.get("properties", {})
|
|
48
49
|
for key, value in property_fields.items():
|
infrahub/graphql/schema.py
CHANGED
|
@@ -20,7 +20,12 @@ from .mutations.convert_object_type import ConvertObjectType
|
|
|
20
20
|
from .mutations.diff import DiffUpdateMutation
|
|
21
21
|
from .mutations.diff_conflict import ResolveDiffConflict
|
|
22
22
|
from .mutations.generator import GeneratorDefinitionRequestRun
|
|
23
|
-
from .mutations.proposed_change import
|
|
23
|
+
from .mutations.proposed_change import (
|
|
24
|
+
ProposedChangeCheckForApprovalRevoke,
|
|
25
|
+
ProposedChangeMerge,
|
|
26
|
+
ProposedChangeRequestRunCheck,
|
|
27
|
+
ProposedChangeReview,
|
|
28
|
+
)
|
|
24
29
|
from .mutations.relationship import RelationshipAdd, RelationshipRemove
|
|
25
30
|
from .mutations.repository import ProcessRepository, ValidateRepositoryConnectivity
|
|
26
31
|
from .mutations.resource_manager import IPAddressPoolGetResource, IPPrefixPoolGetResource
|
|
@@ -29,6 +34,8 @@ from .queries import (
|
|
|
29
34
|
AccountPermissions,
|
|
30
35
|
AccountToken,
|
|
31
36
|
BranchQueryList,
|
|
37
|
+
DeprecatedIPAddressGetNextAvailable,
|
|
38
|
+
DeprecatedIPPrefixGetNextAvailable,
|
|
32
39
|
InfrahubInfo,
|
|
33
40
|
InfrahubIPAddressGetNextAvailable,
|
|
34
41
|
InfrahubIPPrefixGetNextAvailable,
|
|
@@ -36,6 +43,7 @@ from .queries import (
|
|
|
36
43
|
InfrahubResourcePoolUtilization,
|
|
37
44
|
InfrahubSearchAnywhere,
|
|
38
45
|
InfrahubStatus,
|
|
46
|
+
ProposedChangeAvailableActions,
|
|
39
47
|
Relationship,
|
|
40
48
|
)
|
|
41
49
|
from .queries.convert_object_type_mapping import FieldsMappingTypeConversion
|
|
@@ -63,8 +71,12 @@ class InfrahubBaseQuery(ObjectType):
|
|
|
63
71
|
InfrahubEvent = Event
|
|
64
72
|
InfrahubTaskBranchStatus = TaskBranchStatus
|
|
65
73
|
|
|
66
|
-
|
|
67
|
-
|
|
74
|
+
CoreProposedChangeAvailableActions = ProposedChangeAvailableActions
|
|
75
|
+
|
|
76
|
+
IPAddressGetNextAvailable = DeprecatedIPAddressGetNextAvailable
|
|
77
|
+
IPPrefixGetNextAvailable = DeprecatedIPPrefixGetNextAvailable
|
|
78
|
+
InfrahubIPAddressGetNextAvailable = InfrahubIPAddressGetNextAvailable
|
|
79
|
+
InfrahubIPPrefixGetNextAvailable = InfrahubIPPrefixGetNextAvailable
|
|
68
80
|
InfrahubResourcePoolAllocated = InfrahubResourcePoolAllocated
|
|
69
81
|
InfrahubResourcePoolUtilization = InfrahubResourcePoolUtilization
|
|
70
82
|
|
|
@@ -77,10 +89,17 @@ class InfrahubBaseMutation(ObjectType):
|
|
|
77
89
|
InfrahubAccountTokenDelete = InfrahubAccountTokenDelete.Field()
|
|
78
90
|
CoreProposedChangeRunCheck = ProposedChangeRequestRunCheck.Field()
|
|
79
91
|
CoreProposedChangeMerge = ProposedChangeMerge.Field()
|
|
92
|
+
CoreProposedChangeReview = ProposedChangeReview.Field()
|
|
80
93
|
CoreGeneratorDefinitionRun = GeneratorDefinitionRequestRun.Field()
|
|
81
94
|
|
|
82
|
-
|
|
83
|
-
|
|
95
|
+
InfrahubIPPrefixPoolGetResource = IPPrefixPoolGetResource.Field()
|
|
96
|
+
InfrahubIPAddressPoolGetResource = IPAddressPoolGetResource.Field()
|
|
97
|
+
IPPrefixPoolGetResource = IPPrefixPoolGetResource.Field(
|
|
98
|
+
deprecation_reason="This mutation has been renamed to 'InfrahubIPPrefixPoolGetResource'. It will be removed in the next version of Infrahub."
|
|
99
|
+
)
|
|
100
|
+
IPAddressPoolGetResource = IPAddressPoolGetResource.Field(
|
|
101
|
+
deprecation_reason="This mutation has been renamed to 'InfrahubIPAddressPoolGetResource'. It will be removed in the next version of Infrahub."
|
|
102
|
+
)
|
|
84
103
|
|
|
85
104
|
BranchCreate = BranchCreate.Field()
|
|
86
105
|
BranchDelete = BranchDelete.Field()
|
|
@@ -104,3 +123,4 @@ class InfrahubBaseMutation(ObjectType):
|
|
|
104
123
|
ResolveDiffConflict = ResolveDiffConflict.Field()
|
|
105
124
|
|
|
106
125
|
ConvertObjectType = ConvertObjectType.Field()
|
|
126
|
+
CoreProposedChangeCheckForApprovalRevoke = ProposedChangeCheckForApprovalRevoke.Field()
|
|
@@ -16,8 +16,8 @@ from .attribute import (
|
|
|
16
16
|
MacAddressType,
|
|
17
17
|
NumberAttributeType,
|
|
18
18
|
RelatedIPAddressNodeInput,
|
|
19
|
+
RelatedIPPrefixNodeInput,
|
|
19
20
|
RelatedNodeInput,
|
|
20
|
-
RelatedPrefixNodeInput,
|
|
21
21
|
StrAttributeType,
|
|
22
22
|
TextAttributeType,
|
|
23
23
|
)
|
|
@@ -51,8 +51,8 @@ __all__ = [
|
|
|
51
51
|
"NumberAttributeType",
|
|
52
52
|
"PaginatedObjectPermission",
|
|
53
53
|
"RelatedIPAddressNodeInput",
|
|
54
|
+
"RelatedIPPrefixNodeInput",
|
|
54
55
|
"RelatedNodeInput",
|
|
55
|
-
"RelatedPrefixNodeInput",
|
|
56
56
|
"RelationshipNode",
|
|
57
57
|
"StrAttributeType",
|
|
58
58
|
"TaskLog",
|
|
@@ -32,7 +32,7 @@ class IPAddressPoolInput(GenericPoolInput):
|
|
|
32
32
|
prefixlen = Int(required=False)
|
|
33
33
|
|
|
34
34
|
|
|
35
|
-
class
|
|
35
|
+
class IPPrefixPoolInput(GenericPoolInput):
|
|
36
36
|
size = Int(required=False)
|
|
37
37
|
member_type = String(required=False)
|
|
38
38
|
prefix_type = String(required=False)
|
|
@@ -47,10 +47,10 @@ class RelatedIPAddressNodeInput(InputObjectType):
|
|
|
47
47
|
_relation__source = String(required=False)
|
|
48
48
|
|
|
49
49
|
|
|
50
|
-
class
|
|
50
|
+
class RelatedIPPrefixNodeInput(InputObjectType):
|
|
51
51
|
id = String(required=False)
|
|
52
52
|
hfid = Field(List(of_type=String), required=False)
|
|
53
|
-
from_pool = Field(
|
|
53
|
+
from_pool = Field(IPPrefixPoolInput, required=False)
|
|
54
54
|
_relation__is_visible = Boolean(required=False)
|
|
55
55
|
_relation__is_protected = Boolean(required=False)
|
|
56
56
|
_relation__owner = String(required=False)
|