pydantic-fixturegen 1.0.0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-fixturegen might be problematic. Click here for more details.
- pydantic_fixturegen/api/__init__.py +137 -0
- pydantic_fixturegen/api/_runtime.py +726 -0
- pydantic_fixturegen/api/models.py +73 -0
- pydantic_fixturegen/cli/__init__.py +32 -1
- pydantic_fixturegen/cli/check.py +230 -0
- pydantic_fixturegen/cli/diff.py +992 -0
- pydantic_fixturegen/cli/doctor.py +188 -35
- pydantic_fixturegen/cli/gen/_common.py +134 -7
- pydantic_fixturegen/cli/gen/explain.py +597 -40
- pydantic_fixturegen/cli/gen/fixtures.py +244 -112
- pydantic_fixturegen/cli/gen/json.py +229 -138
- pydantic_fixturegen/cli/gen/schema.py +170 -85
- pydantic_fixturegen/cli/init.py +333 -0
- pydantic_fixturegen/cli/schema.py +45 -0
- pydantic_fixturegen/cli/watch.py +126 -0
- pydantic_fixturegen/core/config.py +137 -3
- pydantic_fixturegen/core/config_schema.py +178 -0
- pydantic_fixturegen/core/constraint_report.py +305 -0
- pydantic_fixturegen/core/errors.py +42 -0
- pydantic_fixturegen/core/field_policies.py +100 -0
- pydantic_fixturegen/core/generate.py +241 -37
- pydantic_fixturegen/core/io_utils.py +10 -2
- pydantic_fixturegen/core/path_template.py +197 -0
- pydantic_fixturegen/core/presets.py +73 -0
- pydantic_fixturegen/core/providers/temporal.py +10 -0
- pydantic_fixturegen/core/safe_import.py +146 -12
- pydantic_fixturegen/core/seed_freeze.py +176 -0
- pydantic_fixturegen/emitters/json_out.py +65 -16
- pydantic_fixturegen/emitters/pytest_codegen.py +68 -13
- pydantic_fixturegen/emitters/schema_out.py +27 -3
- pydantic_fixturegen/logging.py +114 -0
- pydantic_fixturegen/schemas/config.schema.json +244 -0
- pydantic_fixturegen-1.1.0.dist-info/METADATA +173 -0
- pydantic_fixturegen-1.1.0.dist-info/RECORD +57 -0
- pydantic_fixturegen-1.0.0.dist-info/METADATA +0 -280
- pydantic_fixturegen-1.0.0.dist-info/RECORD +0 -41
- {pydantic_fixturegen-1.0.0.dist-info → pydantic_fixturegen-1.1.0.dist-info}/WHEEL +0 -0
- {pydantic_fixturegen-1.0.0.dist-info → pydantic_fixturegen-1.1.0.dist-info}/entry_points.txt +0 -0
- {pydantic_fixturegen-1.0.0.dist-info → pydantic_fixturegen-1.1.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,305 @@
|
|
|
1
|
+
"""Constraint enforcement reporting utilities."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Iterable, Mapping
|
|
6
|
+
from dataclasses import dataclass, field
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
|
|
11
|
+
from .schema import FieldSummary
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass(slots=True)
|
|
15
|
+
class FieldFailure:
|
|
16
|
+
location: tuple[str, ...]
|
|
17
|
+
message: str
|
|
18
|
+
error_type: str | None
|
|
19
|
+
value: Any
|
|
20
|
+
hint: str
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass(slots=True)
|
|
24
|
+
class FieldStats:
|
|
25
|
+
constraints: dict[str, Any] | None
|
|
26
|
+
attempts: int = 0
|
|
27
|
+
successes: int = 0
|
|
28
|
+
failures: list[FieldFailure] = field(default_factory=list)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass(slots=True)
|
|
32
|
+
class ModelStats:
|
|
33
|
+
attempts: int = 0
|
|
34
|
+
successes: int = 0
|
|
35
|
+
fields: dict[str, FieldStats] = field(default_factory=dict)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass(slots=True)
|
|
39
|
+
class _AttemptContext:
|
|
40
|
+
model_key: str
|
|
41
|
+
field_summaries: dict[str, FieldSummary] = field(default_factory=dict)
|
|
42
|
+
field_values: dict[str, Any] = field(default_factory=dict)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class ConstraintReporter:
|
|
46
|
+
"""Collect constraint enforcement metrics during generation."""
|
|
47
|
+
|
|
48
|
+
def __init__(self) -> None:
|
|
49
|
+
self._models: dict[str, ModelStats] = {}
|
|
50
|
+
self._stack: list[_AttemptContext] = []
|
|
51
|
+
|
|
52
|
+
@staticmethod
|
|
53
|
+
def _model_key(model: type[BaseModel]) -> str:
|
|
54
|
+
return f"{model.__module__}.{model.__qualname__}"
|
|
55
|
+
|
|
56
|
+
def begin_model(self, model: type[BaseModel]) -> None:
|
|
57
|
+
key = self._model_key(model)
|
|
58
|
+
stats = self._models.setdefault(key, ModelStats())
|
|
59
|
+
stats.attempts += 1
|
|
60
|
+
self._stack.append(_AttemptContext(model_key=key))
|
|
61
|
+
|
|
62
|
+
def record_field_attempt(
|
|
63
|
+
self,
|
|
64
|
+
model: type[BaseModel],
|
|
65
|
+
field_name: str,
|
|
66
|
+
summary: FieldSummary,
|
|
67
|
+
) -> None:
|
|
68
|
+
if not self._stack:
|
|
69
|
+
return
|
|
70
|
+
ctx = self._stack[-1]
|
|
71
|
+
ctx.field_summaries[field_name] = summary
|
|
72
|
+
stats = self._models.setdefault(ctx.model_key, ModelStats())
|
|
73
|
+
constraints_snapshot = _constraints_snapshot(summary)
|
|
74
|
+
field_stats = stats.fields.setdefault(
|
|
75
|
+
field_name,
|
|
76
|
+
FieldStats(constraints=constraints_snapshot),
|
|
77
|
+
)
|
|
78
|
+
if summary.constraints.has_constraints():
|
|
79
|
+
field_stats.attempts += 1
|
|
80
|
+
|
|
81
|
+
def record_field_value(self, field_name: str, value: Any) -> None:
|
|
82
|
+
if not self._stack:
|
|
83
|
+
return
|
|
84
|
+
ctx = self._stack[-1]
|
|
85
|
+
ctx.field_values[field_name] = value
|
|
86
|
+
|
|
87
|
+
def finish_model(
|
|
88
|
+
self,
|
|
89
|
+
model: type[BaseModel],
|
|
90
|
+
*,
|
|
91
|
+
success: bool,
|
|
92
|
+
errors: Iterable[Mapping[str, Any]] | None = None,
|
|
93
|
+
) -> None:
|
|
94
|
+
if not self._stack:
|
|
95
|
+
return
|
|
96
|
+
ctx = self._stack.pop()
|
|
97
|
+
stats = self._models.setdefault(ctx.model_key, ModelStats())
|
|
98
|
+
if success:
|
|
99
|
+
stats.successes += 1
|
|
100
|
+
for field_name, summary in ctx.field_summaries.items():
|
|
101
|
+
if summary.constraints.has_constraints():
|
|
102
|
+
field_stats = stats.fields.setdefault(
|
|
103
|
+
field_name,
|
|
104
|
+
FieldStats(constraints=_constraints_snapshot(summary)),
|
|
105
|
+
)
|
|
106
|
+
field_stats.successes += 1
|
|
107
|
+
return
|
|
108
|
+
|
|
109
|
+
if errors:
|
|
110
|
+
self._record_failures(stats, ctx, errors)
|
|
111
|
+
|
|
112
|
+
def summary(self) -> dict[str, Any]:
|
|
113
|
+
models_summary: list[dict[str, Any]] = []
|
|
114
|
+
total_failures = 0
|
|
115
|
+
for model_key, stats in self._models.items():
|
|
116
|
+
field_entries: list[dict[str, Any]] = []
|
|
117
|
+
for field_name, field_stats in stats.fields.items():
|
|
118
|
+
if not field_stats.constraints and not field_stats.failures:
|
|
119
|
+
continue
|
|
120
|
+
failures = [
|
|
121
|
+
{
|
|
122
|
+
"location": list(failure.location),
|
|
123
|
+
"message": failure.message,
|
|
124
|
+
"error_type": failure.error_type,
|
|
125
|
+
"value": failure.value,
|
|
126
|
+
"hint": failure.hint,
|
|
127
|
+
}
|
|
128
|
+
for failure in field_stats.failures
|
|
129
|
+
]
|
|
130
|
+
total_failures += len(failures)
|
|
131
|
+
field_entries.append(
|
|
132
|
+
{
|
|
133
|
+
"name": field_name,
|
|
134
|
+
"constraints": field_stats.constraints,
|
|
135
|
+
"attempts": field_stats.attempts,
|
|
136
|
+
"successes": field_stats.successes,
|
|
137
|
+
"failures": failures,
|
|
138
|
+
}
|
|
139
|
+
)
|
|
140
|
+
if not field_entries:
|
|
141
|
+
continue
|
|
142
|
+
models_summary.append(
|
|
143
|
+
{
|
|
144
|
+
"model": model_key,
|
|
145
|
+
"attempts": stats.attempts,
|
|
146
|
+
"successes": stats.successes,
|
|
147
|
+
"fields": field_entries,
|
|
148
|
+
}
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
total_models = sum(stats.attempts for stats in self._models.values())
|
|
152
|
+
models_with_failures = sum(
|
|
153
|
+
1 for entry in models_summary if any(field["failures"] for field in entry["fields"])
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
return {
|
|
157
|
+
"models": models_summary,
|
|
158
|
+
"total_models": total_models,
|
|
159
|
+
"models_with_failures": models_with_failures,
|
|
160
|
+
"total_failures": total_failures,
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
def has_failures(self) -> bool:
|
|
164
|
+
return any(
|
|
165
|
+
field.failures for stats in self._models.values() for field in stats.fields.values()
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
def merge_from(self, other: ConstraintReporter) -> None:
|
|
169
|
+
for model_key, other_stats in other._models.items():
|
|
170
|
+
stats = self._models.setdefault(model_key, ModelStats())
|
|
171
|
+
stats.attempts += other_stats.attempts
|
|
172
|
+
stats.successes += other_stats.successes
|
|
173
|
+
for field_name, other_field in other_stats.fields.items():
|
|
174
|
+
field_stats = stats.fields.setdefault(
|
|
175
|
+
field_name,
|
|
176
|
+
FieldStats(constraints=other_field.constraints),
|
|
177
|
+
)
|
|
178
|
+
if field_stats.constraints is None and other_field.constraints is not None:
|
|
179
|
+
field_stats.constraints = other_field.constraints
|
|
180
|
+
field_stats.attempts += other_field.attempts
|
|
181
|
+
field_stats.successes += other_field.successes
|
|
182
|
+
field_stats.failures.extend(other_field.failures)
|
|
183
|
+
|
|
184
|
+
def _record_failures(
|
|
185
|
+
self,
|
|
186
|
+
stats: ModelStats,
|
|
187
|
+
ctx: _AttemptContext,
|
|
188
|
+
errors: Iterable[Mapping[str, Any]],
|
|
189
|
+
) -> None:
|
|
190
|
+
for error in errors:
|
|
191
|
+
loc_raw = tuple(error.get("loc", ()))
|
|
192
|
+
if not loc_raw:
|
|
193
|
+
continue
|
|
194
|
+
top_field = str(loc_raw[0])
|
|
195
|
+
summary = ctx.field_summaries.get(top_field)
|
|
196
|
+
field_stats = stats.fields.setdefault(
|
|
197
|
+
top_field,
|
|
198
|
+
FieldStats(constraints=_constraints_snapshot(summary)),
|
|
199
|
+
)
|
|
200
|
+
value = _extract_value(ctx.field_values.get(top_field), loc_raw[1:])
|
|
201
|
+
error_type = error.get("type")
|
|
202
|
+
message = error.get("msg", "")
|
|
203
|
+
failure = FieldFailure(
|
|
204
|
+
location=tuple(str(part) for part in loc_raw),
|
|
205
|
+
message=message,
|
|
206
|
+
error_type=error_type,
|
|
207
|
+
value=value,
|
|
208
|
+
hint=_hint_for_error(error_type, top_field, summary, message),
|
|
209
|
+
)
|
|
210
|
+
field_stats.failures.append(failure)
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def _constraints_snapshot(summary: FieldSummary | None) -> dict[str, Any] | None:
|
|
214
|
+
if summary is None or not summary.constraints.has_constraints():
|
|
215
|
+
return None
|
|
216
|
+
constraints = summary.constraints
|
|
217
|
+
data: dict[str, Any] = {}
|
|
218
|
+
if constraints.ge is not None:
|
|
219
|
+
data["ge"] = constraints.ge
|
|
220
|
+
if constraints.gt is not None:
|
|
221
|
+
data["gt"] = constraints.gt
|
|
222
|
+
if constraints.le is not None:
|
|
223
|
+
data["le"] = constraints.le
|
|
224
|
+
if constraints.lt is not None:
|
|
225
|
+
data["lt"] = constraints.lt
|
|
226
|
+
if constraints.multiple_of is not None:
|
|
227
|
+
data["multiple_of"] = constraints.multiple_of
|
|
228
|
+
if constraints.min_length is not None:
|
|
229
|
+
data["min_length"] = constraints.min_length
|
|
230
|
+
if constraints.max_length is not None:
|
|
231
|
+
data["max_length"] = constraints.max_length
|
|
232
|
+
if constraints.pattern is not None:
|
|
233
|
+
data["pattern"] = constraints.pattern
|
|
234
|
+
if constraints.max_digits is not None:
|
|
235
|
+
data["max_digits"] = constraints.max_digits
|
|
236
|
+
if constraints.decimal_places is not None:
|
|
237
|
+
data["decimal_places"] = constraints.decimal_places
|
|
238
|
+
return data
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def _extract_value(base: Any, path: tuple[Any, ...]) -> Any:
|
|
242
|
+
current = base
|
|
243
|
+
for part in path:
|
|
244
|
+
if current is None:
|
|
245
|
+
return None
|
|
246
|
+
if isinstance(current, BaseModel):
|
|
247
|
+
current = getattr(current, str(part), None)
|
|
248
|
+
continue
|
|
249
|
+
if isinstance(current, dict):
|
|
250
|
+
current = current.get(part) if part in current else current.get(str(part))
|
|
251
|
+
continue
|
|
252
|
+
if isinstance(current, (list, tuple)) and isinstance(part, int):
|
|
253
|
+
if 0 <= part < len(current):
|
|
254
|
+
current = current[part]
|
|
255
|
+
else:
|
|
256
|
+
return None
|
|
257
|
+
continue
|
|
258
|
+
try:
|
|
259
|
+
current = getattr(current, str(part))
|
|
260
|
+
except AttributeError:
|
|
261
|
+
return None
|
|
262
|
+
return current
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
def _hint_for_error(
|
|
266
|
+
error_type: str | None,
|
|
267
|
+
field_name: str,
|
|
268
|
+
summary: FieldSummary | None,
|
|
269
|
+
message: str,
|
|
270
|
+
) -> str:
|
|
271
|
+
base_hint = (
|
|
272
|
+
"Configure an override for '" + field_name + "' via `[tool.pydantic_fixturegen.overrides]` "
|
|
273
|
+
"or adjust the model constraints."
|
|
274
|
+
)
|
|
275
|
+
if not error_type:
|
|
276
|
+
return base_hint
|
|
277
|
+
if error_type.startswith("value_error.number"):
|
|
278
|
+
return "Adjust numeric bounds or override the provider for '" + field_name + "'."
|
|
279
|
+
if error_type.startswith("value_error.any_str"):
|
|
280
|
+
return (
|
|
281
|
+
"Adjust string length/pattern constraints or override the provider for '"
|
|
282
|
+
+ field_name
|
|
283
|
+
+ "'."
|
|
284
|
+
)
|
|
285
|
+
if error_type.startswith("value_error.list") or error_type.startswith("value_error.collection"):
|
|
286
|
+
return (
|
|
287
|
+
"Adjust collection size constraints or provide a custom generator for '"
|
|
288
|
+
+ field_name
|
|
289
|
+
+ "'."
|
|
290
|
+
)
|
|
291
|
+
if error_type.startswith("type_error"):
|
|
292
|
+
return (
|
|
293
|
+
"Ensure the generated value for '"
|
|
294
|
+
+ field_name
|
|
295
|
+
+ "' matches the expected type or supply an override."
|
|
296
|
+
)
|
|
297
|
+
return base_hint
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
__all__ = [
|
|
301
|
+
"ConstraintReporter",
|
|
302
|
+
"FieldFailure",
|
|
303
|
+
"FieldStats",
|
|
304
|
+
"ModelStats",
|
|
305
|
+
]
|
|
@@ -14,6 +14,8 @@ class ErrorCode(IntEnum):
|
|
|
14
14
|
MAPPING = 20
|
|
15
15
|
EMIT = 30
|
|
16
16
|
UNSAFE_IMPORT = 40
|
|
17
|
+
DIFF = 50
|
|
18
|
+
WATCH = 60
|
|
17
19
|
|
|
18
20
|
|
|
19
21
|
class PFGError(Exception):
|
|
@@ -126,11 +128,51 @@ class UnsafeImportError(PFGError):
|
|
|
126
128
|
)
|
|
127
129
|
|
|
128
130
|
|
|
131
|
+
class DiffError(PFGError):
|
|
132
|
+
"""Raised when generated artifacts differ from expected output."""
|
|
133
|
+
|
|
134
|
+
def __init__(
|
|
135
|
+
self,
|
|
136
|
+
message: str,
|
|
137
|
+
*,
|
|
138
|
+
details: Mapping[str, Any] | None = None,
|
|
139
|
+
hint: str | None = None,
|
|
140
|
+
) -> None:
|
|
141
|
+
super().__init__(
|
|
142
|
+
message,
|
|
143
|
+
code=ErrorCode.DIFF,
|
|
144
|
+
kind="DiffError",
|
|
145
|
+
details=details,
|
|
146
|
+
hint=hint,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
class WatchError(PFGError):
|
|
151
|
+
"""Raised when watch mode cannot be started."""
|
|
152
|
+
|
|
153
|
+
def __init__(
|
|
154
|
+
self,
|
|
155
|
+
message: str,
|
|
156
|
+
*,
|
|
157
|
+
details: Mapping[str, Any] | None = None,
|
|
158
|
+
hint: str | None = None,
|
|
159
|
+
) -> None:
|
|
160
|
+
super().__init__(
|
|
161
|
+
message,
|
|
162
|
+
code=ErrorCode.WATCH,
|
|
163
|
+
kind="WatchError",
|
|
164
|
+
details=details,
|
|
165
|
+
hint=hint,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
|
|
129
169
|
__all__ = [
|
|
130
170
|
"EmitError",
|
|
171
|
+
"DiffError",
|
|
131
172
|
"DiscoveryError",
|
|
132
173
|
"ErrorCode",
|
|
133
174
|
"MappingError",
|
|
134
175
|
"PFGError",
|
|
176
|
+
"WatchError",
|
|
135
177
|
"UnsafeImportError",
|
|
136
178
|
]
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
"""Field policy definitions and matching utilities."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import fnmatch
|
|
6
|
+
import re
|
|
7
|
+
from collections.abc import Iterable, Mapping, Sequence
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class FieldPolicyConflictError(ValueError):
|
|
13
|
+
"""Raised when multiple field policies conflict for the same attribute."""
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass(frozen=True)
|
|
17
|
+
class FieldPolicy:
|
|
18
|
+
pattern: str
|
|
19
|
+
options: Mapping[str, Any]
|
|
20
|
+
index: int
|
|
21
|
+
_is_regex: bool = field(init=False, repr=False)
|
|
22
|
+
_regex: re.Pattern[str] | None = field(init=False, repr=False)
|
|
23
|
+
_specificity: tuple[int, int, int] = field(init=False, repr=False)
|
|
24
|
+
|
|
25
|
+
def __post_init__(self) -> None:
|
|
26
|
+
pattern = self.pattern.strip()
|
|
27
|
+
if not pattern:
|
|
28
|
+
raise ValueError("Field policy pattern must be a non-empty string.")
|
|
29
|
+
|
|
30
|
+
object.__setattr__(self, "_is_regex", pattern.startswith("re:"))
|
|
31
|
+
if self._is_regex:
|
|
32
|
+
compiled = re.compile(pattern[3:])
|
|
33
|
+
object.__setattr__(self, "_regex", compiled)
|
|
34
|
+
object.__setattr__(self, "_specificity", (1000, -len(pattern), -self.index))
|
|
35
|
+
else:
|
|
36
|
+
wildcard_count = pattern.count("*") + pattern.count("?")
|
|
37
|
+
segments = pattern.count(".") + 1
|
|
38
|
+
specificity = (wildcard_count, -segments, -len(pattern))
|
|
39
|
+
object.__setattr__(self, "_regex", None)
|
|
40
|
+
object.__setattr__(self, "_specificity", specificity)
|
|
41
|
+
|
|
42
|
+
@property
|
|
43
|
+
def specificity(self) -> tuple[int, int, int]:
|
|
44
|
+
return self._specificity
|
|
45
|
+
|
|
46
|
+
def matches(self, path: str) -> bool:
|
|
47
|
+
if self._is_regex:
|
|
48
|
+
assert self._regex is not None
|
|
49
|
+
return bool(self._regex.fullmatch(path))
|
|
50
|
+
return fnmatch.fnmatchcase(path, self.pattern)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class FieldPolicySet:
|
|
54
|
+
"""Collection of field policies with deterministic resolution."""
|
|
55
|
+
|
|
56
|
+
def __init__(self, policies: Sequence[FieldPolicy]) -> None:
|
|
57
|
+
self._policies = sorted(policies, key=lambda p: (p.specificity, p.index))
|
|
58
|
+
|
|
59
|
+
def resolve(self, path: str, *, aliases: Sequence[str] | None = None) -> Mapping[str, Any]:
|
|
60
|
+
if not self._policies:
|
|
61
|
+
return {}
|
|
62
|
+
|
|
63
|
+
applied: dict[str, Any] = {}
|
|
64
|
+
sources: dict[str, str] = {}
|
|
65
|
+
candidates: tuple[str, ...]
|
|
66
|
+
if aliases:
|
|
67
|
+
seen: dict[str, None] = {}
|
|
68
|
+
for candidate in (path, *aliases):
|
|
69
|
+
if candidate and candidate not in seen:
|
|
70
|
+
seen[candidate] = None
|
|
71
|
+
candidates = tuple(seen.keys())
|
|
72
|
+
else:
|
|
73
|
+
candidates = (path,)
|
|
74
|
+
|
|
75
|
+
for policy in self._policies:
|
|
76
|
+
if not any(policy.matches(candidate) for candidate in candidates):
|
|
77
|
+
continue
|
|
78
|
+
for key, value in policy.options.items():
|
|
79
|
+
if value is None:
|
|
80
|
+
continue
|
|
81
|
+
if key in applied and applied[key] != value:
|
|
82
|
+
raise FieldPolicyConflictError(
|
|
83
|
+
f"Conflicting field policies for '{path}' attribute '{key}': "
|
|
84
|
+
f"{sources[key]!r} vs {policy.pattern!r}"
|
|
85
|
+
)
|
|
86
|
+
if key not in applied:
|
|
87
|
+
sources[key] = policy.pattern
|
|
88
|
+
applied[key] = value
|
|
89
|
+
|
|
90
|
+
return applied
|
|
91
|
+
|
|
92
|
+
def iterable(self) -> Iterable[FieldPolicy]:
|
|
93
|
+
return self._policies
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
__all__ = [
|
|
97
|
+
"FieldPolicy",
|
|
98
|
+
"FieldPolicyConflictError",
|
|
99
|
+
"FieldPolicySet",
|
|
100
|
+
]
|