schemathesis 4.2.0__py3-none-any.whl → 4.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -119,102 +119,84 @@ def get_command_representation() -> str:
119
119
 
120
120
 
121
121
  def vcr_writer(output: TextOutput, config: ProjectConfig, queue: Queue) -> None:
122
- """Write YAML to a file in an incremental manner.
123
-
124
- This implementation doesn't use `pyyaml` package and composes YAML manually as string due to the following reasons:
125
- - It is much faster. The string-based approach gives only ~2.5% time overhead when `yaml.CDumper` has ~11.2%;
126
- - Implementation complexity. We have a quite simple format where almost all values are strings, and it is much
127
- simpler to implement it with string composition rather than with adjusting `yaml.Serializer` to emit explicit
128
- types. Another point is that with `pyyaml` we need to emit events and handle some low-level details like
129
- providing tags, anchors to have incremental writing, with primitive types it is much simpler.
130
- """
122
+ """Write YAML to a file in an incremental manner."""
131
123
  current_id = 1
132
124
 
133
- def format_header_values(values: list[str]) -> str:
134
- return "\n".join(f" - {json.dumps(v)}" for v in values)
125
+ def write_header_values(stream: IO, values: list[str]) -> None:
126
+ for v in values:
127
+ stream.write(f" - {json.dumps(v)}\n")
135
128
 
136
129
  if config.output.sanitization.enabled:
137
-
138
- def format_headers(headers: dict[str, list[str]]) -> str:
139
- headers = deepclone(headers)
140
- sanitize_value(headers, config=config.output.sanitization)
141
- return "\n".join(f' "{name}":\n{format_header_values(values)}' for name, values in headers.items())
142
-
130
+ sanitization_keys = config.output.sanitization.keys_to_sanitize
131
+ sensitive_markers = config.output.sanitization.sensitive_markers
132
+ replacement = config.output.sanitization.replacement
133
+
134
+ def write_headers(stream: IO, headers: dict[str, list[str]]) -> None:
135
+ for name, values in headers.items():
136
+ lower_name = name.lower()
137
+ stream.write(f' "{name}":\n')
138
+
139
+ # Sanitize inline if needed
140
+ if lower_name in sanitization_keys or any(marker in lower_name for marker in sensitive_markers):
141
+ stream.write(f" - {json.dumps(replacement)}\n")
142
+ else:
143
+ write_header_values(stream, values)
143
144
  else:
144
145
 
145
- def format_headers(headers: dict[str, list[str]]) -> str:
146
- return "\n".join(f' "{name}":\n{format_header_values(values)}' for name, values in headers.items())
147
-
148
- def format_check_message(message: str | None) -> str:
149
- return "~" if message is None else f"{message!r}"
146
+ def write_headers(stream: IO, headers: dict[str, list[str]]) -> None:
147
+ for name, values in headers.items():
148
+ stream.write(f' "{name}":\n')
149
+ write_header_values(stream, values)
150
150
 
151
- def format_checks(checks: list[CheckNode]) -> str:
151
+ def write_checks(stream: IO, checks: list[CheckNode]) -> None:
152
152
  if not checks:
153
- return "\n checks: []"
154
- items = "\n".join(
155
- f" - name: '{check.name}'\n status: '{check.status.name.upper()}'\n message: {format_check_message(check.failure_info.failure.title if check.failure_info else None)}"
156
- for check in checks
157
- )
158
- return f"""
159
- checks:
160
- {items}"""
153
+ stream.write("\n checks: []")
154
+ return
155
+
156
+ stream.write("\n checks:\n")
157
+ for check in checks:
158
+ message = check.failure_info.failure.title if check.failure_info else None
159
+ message_str = "~" if message is None else repr(message)
160
+ stream.write(
161
+ f" - name: '{check.name}'\n"
162
+ f" status: '{check.status.name.upper()}'\n"
163
+ f" message: {message_str}\n"
164
+ )
161
165
 
162
166
  if config.reports.preserve_bytes:
163
167
 
164
- def format_request_body(output: IO, request: Request) -> None:
168
+ def write_request_body(stream: IO, request: Request) -> None:
165
169
  if request.encoded_body is not None:
166
- output.write(
167
- f"""
168
- body:
169
- encoding: 'utf-8'
170
- base64_string: '{request.encoded_body}'"""
171
- )
170
+ stream.write(f"\n body:\n encoding: 'utf-8'\n base64_string: '{request.encoded_body}'")
172
171
 
173
- def format_response_body(output: IO, response: Response) -> None:
172
+ def write_response_body(stream: IO, response: Response) -> None:
174
173
  if response.encoded_body is not None:
175
- output.write(
176
- f""" body:
177
- encoding: '{response.encoding}'
178
- base64_string: '{response.encoded_body}'"""
174
+ stream.write(
175
+ f" body:\n encoding: '{response.encoding}'\n base64_string: '{response.encoded_body}'"
179
176
  )
180
-
181
177
  else:
182
178
 
183
- def format_request_body(output: IO, request: Request) -> None:
179
+ def write_request_body(stream: IO, request: Request) -> None:
184
180
  if request.body is not None:
185
181
  string = request.body.decode("utf8", "replace")
186
- output.write(
187
- """
188
- body:
189
- encoding: 'utf-8'
190
- string: """
191
- )
192
- write_double_quoted(output, string)
182
+ stream.write("\n body:\n encoding: 'utf-8'\n string: ")
183
+ write_double_quoted(stream, string)
193
184
 
194
- def format_response_body(output: IO, response: Response) -> None:
185
+ def write_response_body(stream: IO, response: Response) -> None:
195
186
  if response.content is not None:
196
187
  encoding = response.encoding or "utf8"
197
188
  string = response.content.decode(encoding, "replace")
198
- output.write(
199
- f""" body:
200
- encoding: '{encoding}'
201
- string: """
202
- )
203
- write_double_quoted(output, string)
189
+ stream.write(f" body:\n encoding: '{encoding}'\n string: ")
190
+ write_double_quoted(stream, string)
204
191
 
205
192
  with open_text_output(output) as stream:
206
193
  while True:
207
194
  item = queue.get()
208
- if isinstance(item, Initialize):
209
- stream.write(
210
- f"""command: '{get_command_representation()}'
211
- recorded_with: 'Schemathesis {SCHEMATHESIS_VERSION}'
212
- seed: {item.seed}
213
- http_interactions:"""
214
- )
215
- elif isinstance(item, Process):
195
+ if isinstance(item, Process):
216
196
  for case_id, interaction in item.recorder.interactions.items():
217
197
  case = item.recorder.cases[case_id]
198
+
199
+ # Determine status and checks
218
200
  if interaction.response is not None:
219
201
  if case_id in item.recorder.checks:
220
202
  checks = item.recorder.checks[case_id]
@@ -224,101 +206,93 @@ http_interactions:"""
224
206
  status = check.status
225
207
  break
226
208
  else:
227
- # NOTE: Checks recording could be skipped if Schemathesis start skipping just
228
- # discovered failures in order to get past them and potentially discover more failures
229
209
  checks = []
230
210
  status = Status.SKIP
231
211
  else:
232
212
  checks = []
233
213
  status = Status.ERROR
234
- # Body payloads are handled via separate `stream.write` calls to avoid some allocations
235
- stream.write(
236
- f"""\n- id: '{case_id}'
237
- status: '{status.name}'"""
238
- )
214
+
215
+ # Write interaction header
216
+ stream.write(f"\n- id: '{case_id}'\n status: '{status.name}'")
217
+
218
+ # Write metadata if present
239
219
  meta = case.value.meta
240
220
  if meta is not None:
241
- # Start metadata block
242
- stream.write(f"""
243
- generation:
244
- time: {meta.generation.time}
245
- mode: {meta.generation.mode.value}
246
- components:""")
247
-
248
- # Write components
221
+ stream.write(
222
+ f"\n generation:\n"
223
+ f" time: {meta.generation.time}\n"
224
+ f" mode: {meta.generation.mode.value}\n"
225
+ f" components:"
226
+ )
227
+
249
228
  for kind, info in meta.components.items():
250
- stream.write(f"""
251
- {kind.value}:
252
- mode: '{info.mode.value}'""")
253
- # Write phase info
254
- stream.write("\n phase:")
255
- stream.write(f"\n name: '{meta.phase.name.value}'")
256
- stream.write("\n data: ")
257
-
258
- # Write phase-specific data
229
+ stream.write(f"\n {kind.value}:\n mode: '{info.mode.value}'")
230
+
231
+ stream.write(f"\n phase:\n name: '{meta.phase.name.value}'\n data: ")
232
+
259
233
  if isinstance(meta.phase.data, CoveragePhaseData):
260
- stream.write("""
261
- description: """)
234
+ stream.write("\n description: ")
262
235
  write_double_quoted(stream, meta.phase.data.description)
263
- stream.write("""
264
- location: """)
236
+ stream.write("\n location: ")
265
237
  write_double_quoted(stream, meta.phase.data.location)
266
- stream.write("""
267
- parameter: """)
238
+ stream.write("\n parameter: ")
268
239
  if meta.phase.data.parameter is not None:
269
240
  write_double_quoted(stream, meta.phase.data.parameter)
270
241
  else:
271
242
  stream.write("null")
272
- stream.write("""
273
- parameter_location: """)
243
+ stream.write("\n parameter_location: ")
274
244
  if meta.phase.data.parameter_location is not None:
275
245
  write_double_quoted(stream, meta.phase.data.parameter_location)
276
246
  else:
277
247
  stream.write("null")
278
248
  else:
279
- # Empty objects for these phases
280
249
  stream.write("{}")
281
250
  else:
282
- stream.write("null")
251
+ stream.write("\n metadata: null")
283
252
 
253
+ # Sanitize URL if needed
284
254
  if config.output.sanitization.enabled:
285
255
  uri = sanitize_url(interaction.request.uri, config=config.output.sanitization)
286
256
  else:
287
257
  uri = interaction.request.uri
258
+
288
259
  recorded_at = datetime.datetime.fromtimestamp(
289
260
  interaction.timestamp, datetime.timezone.utc
290
261
  ).isoformat()
262
+
263
+ stream.write(f"\n recorded_at: '{recorded_at}'")
264
+ write_checks(stream, checks)
291
265
  stream.write(
292
- f"""
293
- recorded_at: '{recorded_at}'{format_checks(checks)}
294
- request:
295
- uri: '{uri}'
296
- method: '{interaction.request.method}'
297
- headers:
298
- {format_headers(interaction.request.headers)}"""
266
+ f"\n request:\n uri: '{uri}'\n method: '{interaction.request.method}'\n headers:\n"
299
267
  )
300
- format_request_body(stream, interaction.request)
268
+ write_headers(stream, interaction.request.headers)
269
+ write_request_body(stream, interaction.request)
270
+
271
+ # Write response
301
272
  if interaction.response is not None:
302
273
  stream.write(
303
- f"""
304
- response:
305
- status:
306
- code: '{interaction.response.status_code}'
307
- message: {json.dumps(interaction.response.message)}
308
- elapsed: '{interaction.response.elapsed}'
309
- headers:
310
- {format_headers(interaction.response.headers)}
311
- """
312
- )
313
- format_response_body(stream, interaction.response)
314
- stream.write(
315
- f"""
316
- http_version: '{interaction.response.http_version}'"""
274
+ f"\n response:\n"
275
+ f" status:\n"
276
+ f" code: '{interaction.response.status_code}'\n"
277
+ f" message: {json.dumps(interaction.response.message)}\n"
278
+ f" elapsed: '{interaction.response.elapsed}'\n"
279
+ f" headers:\n"
317
280
  )
281
+ write_headers(stream, interaction.response.headers)
282
+ stream.write("\n")
283
+ write_response_body(stream, interaction.response)
284
+ stream.write(f"\n http_version: '{interaction.response.http_version}'")
318
285
  else:
319
- stream.write("""
320
- response: null""")
286
+ stream.write("\n response: null")
287
+
321
288
  current_id += 1
289
+ elif isinstance(item, Initialize):
290
+ stream.write(
291
+ f"command: '{get_command_representation()}'\n"
292
+ f"recorded_with: 'Schemathesis {SCHEMATHESIS_VERSION}'\n"
293
+ f"seed: {item.seed}\n"
294
+ f"http_interactions:"
295
+ )
322
296
  else:
323
297
  break
324
298
 
@@ -90,7 +90,7 @@ class Bundler:
90
90
  # In the future, it **should** be handled by `hypothesis-jsonschema` instead.
91
91
  cloned = deepclone(resolved_schema)
92
92
  remaining_references = sanitize(cloned)
93
- if remaining_references:
93
+ if reference in remaining_references:
94
94
  # This schema is either infinitely recursive or the sanitization logic misses it, in any
95
95
  # event, we git up here
96
96
  raise InfiniteRecursiveReference(reference)
@@ -1,122 +1,222 @@
1
1
  from __future__ import annotations
2
2
 
3
- from schemathesis.core.jsonschema.keywords import ALL_KEYWORDS
4
- from schemathesis.core.jsonschema.types import JsonSchema, JsonSchemaObject, get_type
3
+ from typing import Any
5
4
 
5
+ from schemathesis.core.jsonschema.types import JsonSchema, JsonSchemaObject
6
6
 
7
- def sanitize(schema: JsonSchema) -> set[str]:
8
- """Remove optional parts of the schema that contain references.
9
7
 
10
- It covers only the most popular cases, as removing all optional parts is complicated.
11
- We might fall back to filtering out invalid cases in the future.
12
- """
8
+ def sanitize(schema: JsonSchema) -> set[str]:
9
+ """Remove $ref from optional locations."""
13
10
  if isinstance(schema, bool):
14
11
  return set()
15
12
 
16
- stack = [schema]
13
+ stack: list[JsonSchema] = [schema]
14
+
17
15
  while stack:
18
16
  current = stack.pop()
19
- if isinstance(current, dict):
20
- # Optional properties
21
- if "properties" in current:
22
- properties = current["properties"]
23
- required = current.get("required", [])
24
- for name, value in list(properties.items()):
25
- if isinstance(value, dict):
26
- if name not in required and _has_references(value):
27
- del properties[name]
28
- elif _find_single_reference_combinators(value):
29
- properties.pop(name, None)
30
- else:
31
- stack.append(value)
32
-
33
- # Optional items
34
- if "items" in current:
35
- _sanitize_items(current)
36
- # Not required additional properties
37
- if "additionalProperties" in current:
38
- _sanitize_additional_properties(current)
39
- for k in _find_single_reference_combinators(current):
40
- del current[k]
17
+ if not isinstance(current, dict):
18
+ continue
19
+
20
+ _sanitize_combinators(current)
21
+
22
+ _sanitize_properties(current)
23
+
24
+ if "items" in current:
25
+ _sanitize_items(current)
26
+
27
+ if "prefixItems" in current:
28
+ _sanitize_prefix_items(current)
29
+
30
+ if "additionalProperties" in current:
31
+ _sanitize_additional_properties(current)
32
+
33
+ if "additionalItems" in current:
34
+ _sanitize_additional_items(current)
35
+
36
+ for value in current.values():
37
+ if isinstance(value, dict):
38
+ stack.append(value)
39
+ elif isinstance(value, list):
40
+ for item in value:
41
+ if isinstance(item, dict):
42
+ stack.append(item)
41
43
 
42
44
  remaining: set[str] = set()
43
45
  _collect_all_references(schema, remaining)
44
46
  return remaining
45
47
 
46
48
 
47
- def _collect_all_references(schema: JsonSchema | list[JsonSchema], remaining: set[str]) -> None:
48
- """Recursively collect all $ref present in the schema."""
49
- if isinstance(schema, dict):
50
- reference = schema.get("$ref")
51
- if isinstance(reference, str):
52
- remaining.add(reference)
53
- for value in schema.values():
54
- _collect_all_references(value, remaining)
55
- elif isinstance(schema, list):
56
- for item in schema:
57
- _collect_all_references(item, remaining)
49
+ def _sanitize_combinators(schema: JsonSchemaObject) -> None:
50
+ """Sanitize anyOf/oneOf/allOf."""
51
+ for combinator_key in ("anyOf", "oneOf"):
52
+ variants = schema.get(combinator_key)
53
+ if not isinstance(variants, list):
54
+ continue
58
55
 
56
+ flattened = _flatten_combinator(variants, combinator_key)
59
57
 
60
- def _has_references_in_items(items: list[JsonSchema]) -> bool:
61
- return any("$ref" in item for item in items if isinstance(item, dict))
58
+ cleaned = [variant for variant in flattened if not _has_ref(variant)]
62
59
 
60
+ # Only update if we have non-$ref variants
61
+ if cleaned:
62
+ # At least one alternative remains, which narrows the constraints
63
+ schema[combinator_key] = cleaned
64
+ elif not flattened:
65
+ schema.pop(combinator_key, None)
63
66
 
64
- def _has_references(schema: JsonSchemaObject) -> bool:
65
- if "$ref" in schema:
67
+ all_of = schema.get("allOf")
68
+ if isinstance(all_of, list):
69
+ flattened = _flatten_combinator(all_of, "allOf")
70
+
71
+ cleaned = [variant for variant in flattened if not _is_empty(variant)]
72
+ if cleaned:
73
+ schema["allOf"] = cleaned
74
+ else:
75
+ schema.pop("allOf", None)
76
+
77
+
78
+ def _flatten_combinator(variants: list, key: str) -> list:
79
+ """Flatten nested same-type combinators."""
80
+ result = []
81
+ for variant in variants:
82
+ if isinstance(variant, dict) and key in variant and isinstance(variant[key], list):
83
+ result.extend(variant[key])
84
+ else:
85
+ result.append(variant)
86
+ return result
87
+
88
+
89
+ def _is_empty(schema: JsonSchema) -> bool:
90
+ """Check if schema accepts anything."""
91
+ if schema is True:
66
92
  return True
67
- items = schema.get("items")
68
- return (isinstance(items, dict) and "$ref" in items) or isinstance(items, list) and _has_references_in_items(items)
69
93
 
94
+ if not isinstance(schema, dict):
95
+ return False
70
96
 
71
- def _is_optional_schema(schema: JsonSchema) -> bool:
72
- # Whether this schema could be dropped from a list of schemas
73
- if isinstance(schema, bool):
97
+ if not schema:
74
98
  return True
75
- type_ = get_type(schema)
76
- if type_ == ["object"]:
77
- # Empty object is valid for this schema -> could be dropped
78
- return schema.get("required", []) == [] and schema.get("minProperties", 0) == 0
79
- # Has at least one keyword -> should not be removed
80
- return not any(k in ALL_KEYWORDS for k in schema)
81
-
82
-
83
- def _find_single_reference_combinators(schema: JsonSchemaObject) -> list[str]:
84
- # Schema example:
85
- # {
86
- # "type": "object",
87
- # "properties": {
88
- # "parent": {
89
- # "allOf": [{"$ref": "#/components/schemas/User"}]
90
- # }
91
- # }
92
- # }
93
- found = []
94
- for keyword in ("allOf", "oneOf", "anyOf"):
95
- combinator = schema.get(keyword)
96
- if combinator is not None:
97
- optionals = [subschema for subschema in combinator if not _is_optional_schema(subschema)]
98
- # NOTE: The first schema is not bool, hence it is safe to pass it to `_has_references`
99
- if len(optionals) == 1 and _has_references(optionals[0]):
100
- found.append(keyword)
101
- return found
99
+
100
+ # Only non-validating keywords
101
+ NON_VALIDATING = {
102
+ "$id",
103
+ "$schema",
104
+ "$defs",
105
+ "definitions",
106
+ "title",
107
+ "description",
108
+ "default",
109
+ "examples",
110
+ "example",
111
+ "$comment",
112
+ "deprecated",
113
+ "readOnly",
114
+ "writeOnly",
115
+ }
116
+
117
+ return all(key in NON_VALIDATING for key in schema.keys())
118
+
119
+
120
+ def _sanitize_properties(schema: JsonSchemaObject) -> None:
121
+ """Remove OPTIONAL property schemas if they have $ref."""
122
+ if "properties" not in schema:
123
+ return
124
+
125
+ properties = schema["properties"]
126
+ if not isinstance(properties, dict):
127
+ return
128
+
129
+ required = schema.get("required", [])
130
+
131
+ for name, subschema in list(properties.items()):
132
+ if not _has_ref(subschema):
133
+ continue
134
+
135
+ if name not in required:
136
+ del properties[name]
102
137
 
103
138
 
104
139
  def _sanitize_items(schema: JsonSchemaObject) -> None:
140
+ """Convert to empty array ONLY if minItems allows it."""
105
141
  items = schema["items"]
142
+
143
+ has_ref = False
144
+ if isinstance(items, dict):
145
+ has_ref = _has_ref(items)
146
+ elif isinstance(items, list):
147
+ has_ref = any(_has_ref(item) for item in items)
148
+
149
+ if not has_ref:
150
+ return
151
+
106
152
  min_items = schema.get("minItems", 0)
107
- if not min_items:
108
- if isinstance(items, dict) and ("$ref" in items or _find_single_reference_combinators(items)):
109
- _convert_to_empty_array(schema)
110
- if isinstance(items, list) and _has_references_in_items(items):
111
- _convert_to_empty_array(schema)
153
+
154
+ if min_items == 0:
155
+ _convert_to_empty_array(schema)
156
+
157
+
158
+ def _sanitize_prefix_items(schema: JsonSchemaObject) -> None:
159
+ """Same logic as items."""
160
+ prefix_items = schema["prefixItems"]
161
+
162
+ if not isinstance(prefix_items, list):
163
+ return
164
+
165
+ if not any(_has_ref(item) for item in prefix_items):
166
+ return
167
+
168
+ min_items = schema.get("minItems", 0)
169
+
170
+ if min_items == 0:
171
+ _convert_to_empty_array(schema)
112
172
 
113
173
 
114
174
  def _convert_to_empty_array(schema: JsonSchemaObject) -> None:
115
- del schema["items"]
175
+ schema.pop("items", None)
176
+ schema.pop("prefixItems", None)
116
177
  schema["maxItems"] = 0
178
+ schema["minItems"] = 0
117
179
 
118
180
 
119
181
  def _sanitize_additional_properties(schema: JsonSchemaObject) -> None:
120
- additional_properties = schema["additionalProperties"]
121
- if isinstance(additional_properties, dict) and "$ref" in additional_properties:
182
+ additional = schema["additionalProperties"]
183
+ if _has_ref(additional):
122
184
  schema["additionalProperties"] = False
185
+
186
+
187
+ def _sanitize_additional_items(schema: JsonSchemaObject) -> None:
188
+ additional = schema["additionalItems"]
189
+ if _has_ref(additional):
190
+ schema["additionalItems"] = False
191
+
192
+
193
+ def _has_ref(schema: Any) -> bool:
194
+ """Check if schema contains $ref at any level."""
195
+ if not isinstance(schema, dict):
196
+ return False
197
+
198
+ if "$ref" in schema:
199
+ return True
200
+ for value in schema.values():
201
+ if isinstance(value, dict):
202
+ if _has_ref(value):
203
+ return True
204
+ elif isinstance(value, list):
205
+ for item in value:
206
+ if isinstance(item, dict) and _has_ref(item):
207
+ return True
208
+
209
+ return False
210
+
211
+
212
+ def _collect_all_references(schema: JsonSchema | list[JsonSchema], remaining: set[str]) -> None:
213
+ """Collect all remaining $ref."""
214
+ if isinstance(schema, dict):
215
+ ref = schema.get("$ref")
216
+ if isinstance(ref, str):
217
+ remaining.add(ref)
218
+ for value in schema.values():
219
+ _collect_all_references(value, remaining)
220
+ elif isinstance(schema, list):
221
+ for item in schema:
222
+ _collect_all_references(item, remaining)
@@ -17,7 +17,7 @@ def get_type(schema: JsonSchema, *, _check_type: bool = False) -> list[str]:
17
17
  return [ty]
18
18
  if ty is ANY_TYPE:
19
19
  return list(ty)
20
- return list(ty)
20
+ return [t for t in ALL_TYPES if t in ty]
21
21
 
22
22
 
23
23
  def _get_type(schema: JsonSchema) -> list[str]:
@@ -1,4 +1,8 @@
1
- from typing import Any
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from functools import lru_cache
5
+ from typing import Any, Literal
2
6
 
3
7
 
4
8
  def setup() -> None:
@@ -9,6 +13,8 @@ def setup() -> None:
9
13
  from hypothesis.strategies._internal import collections, core
10
14
  from hypothesis.vendor import pretty
11
15
  from hypothesis_jsonschema import _canonicalise, _from_schema, _resolve
16
+ from hypothesis_jsonschema._canonicalise import SCHEMA_KEYS, SCHEMA_OBJECT_KEYS, merged
17
+ from hypothesis_jsonschema._resolve import LocalResolver
12
18
 
13
19
  from schemathesis.core import INTERNAL_BUFFER_SIZE
14
20
  from schemathesis.core.jsonschema.types import _get_type
@@ -35,11 +41,79 @@ def setup() -> None:
35
41
  # depending on the schema size (~300 seconds -> 4.5 seconds in one of the benchmarks)
36
42
  return None
37
43
 
44
+ class CacheableSchema:
45
+ """Cache schema by its JSON representation.
46
+
47
+ Canonicalisation is not required as schemas with the same JSON representation
48
+ will have the same validator.
49
+ """
50
+
51
+ __slots__ = ("schema", "encoded")
52
+
53
+ def __init__(self, schema: dict[str, Any]) -> None:
54
+ self.schema = schema
55
+ self.encoded = hash(json.dumps(schema, sort_keys=True))
56
+
57
+ def __eq__(self, other: "CacheableSchema") -> bool: # type: ignore
58
+ return self.encoded == other.encoded
59
+
60
+ def __hash__(self) -> int:
61
+ return self.encoded
62
+
63
+ SCHEMA_KEYS = frozenset(SCHEMA_KEYS)
64
+ SCHEMA_OBJECT_KEYS = frozenset(SCHEMA_OBJECT_KEYS)
65
+
66
+ @lru_cache()
67
+ def get_resolver(cache_key: CacheableSchema) -> LocalResolver:
68
+ """LRU resolver cache."""
69
+ return LocalResolver.from_schema(cache_key.schema)
70
+
71
+ def resolve_all_refs(
72
+ schema: Literal[True, False] | dict[str, Any],
73
+ *,
74
+ resolver: LocalResolver | None = None,
75
+ ) -> dict[str, Any]:
76
+ if schema is True:
77
+ return {}
78
+ if schema is False:
79
+ return {"not": {}}
80
+ if not schema:
81
+ return schema
82
+ if resolver is None:
83
+ resolver = get_resolver(CacheableSchema(schema))
84
+
85
+ _resolve_all_refs = resolve_all_refs
86
+
87
+ if "$ref" in schema:
88
+ s = dict(schema)
89
+ ref = s.pop("$ref")
90
+ url, resolved = resolver.resolve(ref)
91
+ resolver.push_scope(url)
92
+ try:
93
+ return merged([s, _resolve_all_refs(deepclone(resolved), resolver=resolver)]) # type: ignore
94
+ finally:
95
+ resolver.pop_scope()
96
+
97
+ for key, value in schema.items():
98
+ if key in SCHEMA_KEYS:
99
+ if isinstance(value, list):
100
+ schema[key] = [_resolve_all_refs(v, resolver=resolver) if isinstance(v, dict) else v for v in value]
101
+ elif isinstance(value, dict):
102
+ schema[key] = _resolve_all_refs(value, resolver=resolver)
103
+ if key in SCHEMA_OBJECT_KEYS:
104
+ schema[key] = {
105
+ k: _resolve_all_refs(v, resolver=resolver) if isinstance(v, dict) else v for k, v in value.items()
106
+ }
107
+ return schema
108
+
38
109
  root_core.RepresentationPrinter = RepresentationPrinter # type: ignore
39
110
  _resolve.deepcopy = deepclone # type: ignore
111
+ _resolve.resolve_all_refs = resolve_all_refs # type: ignore
40
112
  _from_schema.deepcopy = deepclone # type: ignore
41
113
  _from_schema.get_type = _get_type # type: ignore
114
+ _from_schema.resolve_all_refs = resolve_all_refs # type: ignore
42
115
  _canonicalise.get_type = _get_type # type: ignore
116
+ _canonicalise.CacheableSchema = CacheableSchema # type: ignore
43
117
  root_core.BUFFER_SIZE = INTERNAL_BUFFER_SIZE # type: ignore
44
118
  engine.BUFFER_SIZE = INTERNAL_BUFFER_SIZE
45
119
  collections.BUFFER_SIZE = INTERNAL_BUFFER_SIZE # type: ignore
@@ -109,15 +109,19 @@ def extract_top_level(
109
109
  assert isinstance(operation.schema, BaseOpenAPISchema)
110
110
 
111
111
  responses = list(operation.responses.iter_examples())
112
- seen_references: set[str] = set()
113
112
  for parameter in operation.iter_parameters():
114
113
  if "schema" in parameter.definition:
115
114
  schema = parameter.definition["schema"]
116
115
  resolver = RefResolver.from_schema(schema)
117
- seen_references.clear()
116
+ reference_path: tuple[str, ...] = ()
118
117
  definitions = [
119
118
  parameter.definition,
120
- *_expand_subschemas(schema=schema, resolver=resolver, seen_references=seen_references),
119
+ *[
120
+ expanded_schema
121
+ for expanded_schema, _ in _expand_subschemas(
122
+ schema=schema, resolver=resolver, reference_path=reference_path
123
+ )
124
+ ],
121
125
  ]
122
126
  else:
123
127
  definitions = [parameter.definition]
@@ -138,10 +142,15 @@ def extract_top_level(
138
142
  if "schema" in parameter.definition:
139
143
  schema = parameter.definition["schema"]
140
144
  resolver = RefResolver.from_schema(schema)
141
- seen_references.clear()
142
- for expanded in _expand_subschemas(schema=schema, resolver=resolver, seen_references=seen_references):
143
- if isinstance(expanded, dict) and parameter.adapter.examples_container_keyword in expanded:
144
- for value in expanded[parameter.adapter.examples_container_keyword]:
145
+ reference_path = ()
146
+ for expanded_schema, _ in _expand_subschemas(
147
+ schema=schema, resolver=resolver, reference_path=reference_path
148
+ ):
149
+ if (
150
+ isinstance(expanded_schema, dict)
151
+ and parameter.adapter.examples_container_keyword in expanded_schema
152
+ ):
153
+ for value in expanded_schema[parameter.adapter.examples_container_keyword]:
145
154
  yield ParameterExample(
146
155
  container=parameter.location.container_name, name=parameter.name, value=value
147
156
  )
@@ -152,10 +161,15 @@ def extract_top_level(
152
161
  if "schema" in body.definition:
153
162
  schema = body.definition["schema"]
154
163
  resolver = RefResolver.from_schema(schema)
155
- seen_references.clear()
164
+ reference_path = ()
156
165
  definitions = [
157
166
  body.definition,
158
- *_expand_subschemas(schema=schema, resolver=resolver, seen_references=seen_references),
167
+ *[
168
+ expanded_schema
169
+ for expanded_schema, _ in _expand_subschemas(
170
+ schema=schema, resolver=resolver, reference_path=reference_path
171
+ )
172
+ ],
159
173
  ]
160
174
  else:
161
175
  definitions = [body.definition]
@@ -172,58 +186,76 @@ def extract_top_level(
172
186
  if "schema" in body.definition:
173
187
  schema = body.definition["schema"]
174
188
  resolver = RefResolver.from_schema(schema)
175
- seen_references.clear()
176
- for expanded in _expand_subschemas(schema=schema, resolver=resolver, seen_references=seen_references):
177
- if isinstance(expanded, dict) and body.adapter.examples_container_keyword in expanded:
178
- for value in expanded[body.adapter.examples_container_keyword]:
189
+ reference_path = ()
190
+ for expanded_schema, _ in _expand_subschemas(
191
+ schema=schema, resolver=resolver, reference_path=reference_path
192
+ ):
193
+ if isinstance(expanded_schema, dict) and body.adapter.examples_container_keyword in expanded_schema:
194
+ for value in expanded_schema[body.adapter.examples_container_keyword]:
179
195
  yield BodyExample(value=value, media_type=body.media_type)
180
196
 
181
197
 
182
198
  @overload
183
199
  def _resolve_bundled(
184
- schema: dict[str, Any], resolver: RefResolver, seen_references: set[str]
185
- ) -> dict[str, Any]: ... # pragma: no cover
200
+ schema: dict[str, Any], resolver: RefResolver, reference_path: tuple[str, ...]
201
+ ) -> tuple[dict[str, Any], tuple[str, ...]]: ...
186
202
 
187
203
 
188
204
  @overload
189
- def _resolve_bundled(schema: bool, resolver: RefResolver, seen_references: set[str]) -> bool: ... # pragma: no cover
205
+ def _resolve_bundled(
206
+ schema: bool, resolver: RefResolver, reference_path: tuple[str, ...]
207
+ ) -> tuple[bool, tuple[str, ...]]: ...
190
208
 
191
209
 
192
210
  def _resolve_bundled(
193
- schema: dict[str, Any] | bool, resolver: RefResolver, seen_references: set[str]
194
- ) -> dict[str, Any] | bool:
211
+ schema: dict[str, Any] | bool, resolver: RefResolver, reference_path: tuple[str, ...]
212
+ ) -> tuple[dict[str, Any] | bool, tuple[str, ...]]:
213
+ """Resolve $ref if present."""
195
214
  if isinstance(schema, dict):
196
215
  reference = schema.get("$ref")
197
216
  if isinstance(reference, str):
198
- if reference in seen_references:
217
+ # Check if this reference is already in the current path
218
+ if reference in reference_path:
199
219
  # Try to remove recursive references to avoid infinite recursion
200
220
  remaining_references = references.sanitize(schema)
201
221
  if reference in remaining_references:
202
222
  raise InfiniteRecursiveReference(reference)
203
- seen_references.add(reference)
223
+
224
+ new_path = reference_path + (reference,)
225
+
204
226
  try:
205
- _, schema = resolver.resolve(schema["$ref"])
227
+ _, resolved_schema = resolver.resolve(reference)
206
228
  except RefResolutionError as exc:
207
229
  raise UnresolvableReference(reference) from exc
208
- return schema
230
+
231
+ return resolved_schema, new_path
232
+
233
+ return schema, reference_path
209
234
 
210
235
 
211
236
  def _expand_subschemas(
212
- *, schema: dict[str, Any] | bool, resolver: RefResolver, seen_references: set[str]
213
- ) -> Generator[dict[str, Any] | bool, None, None]:
214
- schema = _resolve_bundled(schema, resolver, seen_references)
215
- yield schema
237
+ *, schema: dict[str, Any] | bool, resolver: RefResolver, reference_path: tuple[str, ...]
238
+ ) -> Generator[tuple[dict[str, Any] | bool, tuple[str, ...]], None, None]:
239
+ """Expand schema and all its subschemas."""
240
+ schema, current_path = _resolve_bundled(schema, resolver, reference_path)
241
+ yield (schema, current_path)
242
+
216
243
  if isinstance(schema, dict):
244
+ # For anyOf/oneOf, yield each alternative with the same path
217
245
  for key in ("anyOf", "oneOf"):
218
246
  if key in schema:
219
247
  for subschema in schema[key]:
220
- yield subschema
248
+ # Each alternative starts with the current path
249
+ yield (subschema, current_path)
250
+
251
+ # For allOf, merge all alternatives
221
252
  if "allOf" in schema:
222
253
  subschema = deepclone(schema["allOf"][0])
223
- subschema = _resolve_bundled(subschema, resolver, seen_references)
254
+ subschema, _ = _resolve_bundled(subschema, resolver, current_path)
255
+
224
256
  for sub in schema["allOf"][1:]:
225
257
  if isinstance(sub, dict):
226
- sub = _resolve_bundled(sub, resolver, seen_references)
258
+ sub, _ = _resolve_bundled(sub, resolver, current_path)
227
259
  for key, value in sub.items():
228
260
  if key == "properties":
229
261
  subschema.setdefault("properties", {}).update(value)
@@ -235,7 +267,8 @@ def _expand_subschemas(
235
267
  subschema.setdefault("examples", []).append(value)
236
268
  else:
237
269
  subschema[key] = value
238
- yield subschema
270
+
271
+ yield (subschema, current_path)
239
272
 
240
273
 
241
274
  def extract_inner_examples(examples: dict[str, Any] | list, schema: BaseOpenAPISchema) -> Generator[Any, None, None]:
@@ -269,13 +302,12 @@ def extract_from_schemas(
269
302
  operation: APIOperation[OpenApiParameter, OpenApiResponses, OpenApiSecurityParameters],
270
303
  ) -> Generator[Example, None, None]:
271
304
  """Extract examples from parameters' schema definitions."""
272
- seen_references: set[str] = set()
273
305
  for parameter in operation.iter_parameters():
274
306
  schema = parameter.optimized_schema
275
307
  if isinstance(schema, bool):
276
308
  continue
277
309
  resolver = RefResolver.from_schema(schema)
278
- seen_references.clear()
310
+ reference_path: tuple[str, ...] = ()
279
311
  bundle_storage = schema.get(BUNDLE_STORAGE_KEY)
280
312
  for value in extract_from_schema(
281
313
  operation=operation,
@@ -283,7 +315,7 @@ def extract_from_schemas(
283
315
  example_keyword=parameter.adapter.example_keyword,
284
316
  examples_container_keyword=parameter.adapter.examples_container_keyword,
285
317
  resolver=resolver,
286
- seen_references=seen_references,
318
+ reference_path=reference_path,
287
319
  bundle_storage=bundle_storage,
288
320
  ):
289
321
  yield ParameterExample(container=parameter.location.container_name, name=parameter.name, value=value)
@@ -295,14 +327,14 @@ def extract_from_schemas(
295
327
  resolver = RefResolver.from_schema(schema)
296
328
  bundle_storage = schema.get(BUNDLE_STORAGE_KEY)
297
329
  for example_keyword, examples_container_keyword in (("example", "examples"), ("x-example", "x-examples")):
298
- seen_references.clear()
330
+ reference_path = ()
299
331
  for value in extract_from_schema(
300
332
  operation=operation,
301
333
  schema=schema,
302
334
  example_keyword=example_keyword,
303
335
  examples_container_keyword=examples_container_keyword,
304
336
  resolver=resolver,
305
- seen_references=seen_references,
337
+ reference_path=reference_path,
306
338
  bundle_storage=bundle_storage,
307
339
  ):
308
340
  yield BodyExample(value=value, media_type=body.media_type)
@@ -315,49 +347,57 @@ def extract_from_schema(
315
347
  example_keyword: str,
316
348
  examples_container_keyword: str,
317
349
  resolver: RefResolver,
318
- seen_references: set[str],
350
+ reference_path: tuple[str, ...],
319
351
  bundle_storage: dict[str, Any] | None,
320
352
  ) -> Generator[Any, None, None]:
321
353
  """Extract all examples from a single schema definition."""
322
354
  # This implementation supports only `properties` and `items`
323
- schema = _resolve_bundled(schema, resolver, seen_references)
355
+ schema, current_path = _resolve_bundled(schema, resolver, reference_path)
356
+
324
357
  if "properties" in schema:
325
358
  variants = {}
326
359
  required = schema.get("required", [])
327
360
  to_generate: dict[str, Any] = {}
361
+
328
362
  for name, subschema in schema["properties"].items():
329
363
  values = []
330
- for subsubschema in _expand_subschemas(
331
- schema=subschema, resolver=resolver, seen_references=seen_references
364
+ for expanded_schema, expanded_path in _expand_subschemas(
365
+ schema=subschema, resolver=resolver, reference_path=current_path
332
366
  ):
333
- if isinstance(subsubschema, bool):
334
- to_generate[name] = subsubschema
367
+ if isinstance(expanded_schema, bool):
368
+ to_generate[name] = expanded_schema
335
369
  continue
336
- if example_keyword in subsubschema:
337
- values.append(subsubschema[example_keyword])
338
- if examples_container_keyword in subsubschema and isinstance(
339
- subsubschema[examples_container_keyword], list
370
+
371
+ if example_keyword in expanded_schema:
372
+ values.append(expanded_schema[example_keyword])
373
+
374
+ if examples_container_keyword in expanded_schema and isinstance(
375
+ expanded_schema[examples_container_keyword], list
340
376
  ):
341
377
  # These are JSON Schema examples, which is an array of values
342
- values.extend(subsubschema[examples_container_keyword])
378
+ values.extend(expanded_schema[examples_container_keyword])
379
+
343
380
  # Check nested examples as well
344
381
  values.extend(
345
382
  extract_from_schema(
346
383
  operation=operation,
347
- schema=subsubschema,
384
+ schema=expanded_schema,
348
385
  example_keyword=example_keyword,
349
386
  examples_container_keyword=examples_container_keyword,
350
387
  resolver=resolver,
351
- seen_references=seen_references,
388
+ reference_path=expanded_path,
352
389
  bundle_storage=bundle_storage,
353
390
  )
354
391
  )
392
+
355
393
  if not values:
356
394
  if name in required:
357
395
  # Defer generation to only generate these variants if at least one property has examples
358
- to_generate[name] = subsubschema
396
+ to_generate[name] = expanded_schema
359
397
  continue
398
+
360
399
  variants[name] = values
400
+
361
401
  if variants:
362
402
  config = operation.schema.config.generation_for(operation=operation, phase="examples")
363
403
  for name, subschema in to_generate.items():
@@ -369,6 +409,7 @@ def extract_from_schema(
369
409
  subschema[BUNDLE_STORAGE_KEY] = bundle_storage
370
410
  generated = _generate_single_example(subschema, config)
371
411
  variants[name] = [generated]
412
+
372
413
  # Calculate the maximum number of examples any property has
373
414
  total_combos = max(len(examples) for examples in variants.values())
374
415
  # Evenly distribute examples by cycling through them
@@ -377,6 +418,7 @@ def extract_from_schema(
377
418
  name: next(islice(cycle(property_variants), idx, None))
378
419
  for name, property_variants in variants.items()
379
420
  }
421
+
380
422
  elif "items" in schema and isinstance(schema["items"], dict):
381
423
  # Each inner value should be wrapped in an array
382
424
  for value in extract_from_schema(
@@ -385,7 +427,7 @@ def extract_from_schema(
385
427
  example_keyword=example_keyword,
386
428
  examples_container_keyword=examples_container_keyword,
387
429
  resolver=resolver,
388
- seen_references=seen_references,
430
+ reference_path=current_path,
389
431
  bundle_storage=bundle_storage,
390
432
  ):
391
433
  yield [value]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: schemathesis
3
- Version: 4.2.0
3
+ Version: 4.2.2
4
4
  Summary: Property-based testing framework for Open API and GraphQL based apps
5
5
  Project-URL: Documentation, https://schemathesis.readthedocs.io/en/stable/
6
6
  Project-URL: Changelog, https://github.com/schemathesis/schemathesis/blob/master/CHANGELOG.md
@@ -21,7 +21,7 @@ schemathesis/cli/commands/run/loaders.py,sha256=6j0ez7wduAUYbUT28ELKxMf-dYEWr_67
21
21
  schemathesis/cli/commands/run/validation.py,sha256=DQaMiBLN2tYT9hONvv8xnyPvNXZH768UlOdUxTd5kZs,9193
22
22
  schemathesis/cli/commands/run/handlers/__init__.py,sha256=TPZ3KdGi8m0fjlN0GjA31MAXXn1qI7uU4FtiDwroXZI,1915
23
23
  schemathesis/cli/commands/run/handlers/base.py,sha256=qUtDvtr3F6were_BznfnaPpMibGJMnQ5CA9aEzcIUBc,1306
24
- schemathesis/cli/commands/run/handlers/cassettes.py,sha256=Px1-xBw5t6tg8rzYNM-VBTpe6qvbUu_RsrYBG_RWGt8,19501
24
+ schemathesis/cli/commands/run/handlers/cassettes.py,sha256=LzvQp--Ub5MXF7etet7fQD0Ufloh1R0j2X1o9dT8Z4k,19253
25
25
  schemathesis/cli/commands/run/handlers/junitxml.py,sha256=qiFvM4-SlM67sep003SkLqPslzaEb4nOm3bkzw-DO-Q,2602
26
26
  schemathesis/cli/commands/run/handlers/output.py,sha256=TwK82zNpIZ7Q76ggTp8gcW2clzrw0WBmHFJMcvYL1nE,63927
27
27
  schemathesis/cli/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -69,10 +69,10 @@ schemathesis/core/transport.py,sha256=LQcamAkFqJ0HuXQzepevAq2MCJW-uq5Nm-HE9yc7HM
69
69
  schemathesis/core/validation.py,sha256=b0USkKzkWvdz3jOW1JXYc_TfYshfKZeP7xAUnMqcNoc,2303
70
70
  schemathesis/core/version.py,sha256=dOBUWrY3-uA2NQXJp9z7EtZgkR6jYeLg8sMhQCL1mcI,205
71
71
  schemathesis/core/jsonschema/__init__.py,sha256=gBZGsXIpK2EFfcp8x0b69dqzWAm2OeZHepKImkkLvoE,320
72
- schemathesis/core/jsonschema/bundler.py,sha256=ECNAHrXl5nh52crm5Qu1QvRWVV2Vv9gU8H722oKA7k0,7711
72
+ schemathesis/core/jsonschema/bundler.py,sha256=IWZqKb0PqYSKtFW5ncItqHg5o1uVM0yS7vgbBg6pD0c,7724
73
73
  schemathesis/core/jsonschema/keywords.py,sha256=pjseXTfH9OItNs_Qq6ubkhNWQOrxTnwHmrP_jxrHeJU,631
74
- schemathesis/core/jsonschema/references.py,sha256=uB7-DGYhLFqbIAvuO-IDc9xSatoGw54FOYn4xE4qE6A,4667
75
- schemathesis/core/jsonschema/types.py,sha256=_B1Q6pFzLqjuLU6mR1JiHsRbuz_pnSmN5HNnbeZJtT0,1223
74
+ schemathesis/core/jsonschema/references.py,sha256=c2Q4IKWUbwENNtkbFaqf8r3LLZu6GFE5YLnYQlg5tPg,6069
75
+ schemathesis/core/jsonschema/types.py,sha256=C7f9g8yKFuoxC5_0YNIh8QAyGU0-tj8pzTMfMDjjjVM,1248
76
76
  schemathesis/core/output/__init__.py,sha256=SiHqONFskXl73AtP5dV29L14nZoKo7B-IeG52KZB32M,1446
77
77
  schemathesis/core/output/sanitization.py,sha256=Ev3tae8dVwsYd7yVb2_1VBFYs92WFsQ4Eu1fGaymItE,2013
78
78
  schemathesis/engine/__init__.py,sha256=QaFE-FinaTAaarteADo2RRMJ-Sz6hZB9TzD5KjMinIA,706
@@ -98,7 +98,7 @@ schemathesis/generation/meta.py,sha256=tXhUZBEdpQMn68uMx1SW8Vv59Uf6Wl6yzs-VB9lu_
98
98
  schemathesis/generation/metrics.py,sha256=cZU5HdeAMcLFEDnTbNE56NuNq4P0N4ew-g1NEz5-kt4,2836
99
99
  schemathesis/generation/modes.py,sha256=Q1fhjWr3zxabU5qdtLvKfpMFZJAwlW9pnxgenjeXTyU,481
100
100
  schemathesis/generation/overrides.py,sha256=xI2djHsa42fzP32xpxgxO52INixKagf5DjDAWJYswM8,3890
101
- schemathesis/generation/hypothesis/__init__.py,sha256=j_lKp7loYRe63TCzIx5yozyt0Ub1ilSsXEP-2zfJ0Ok,2144
101
+ schemathesis/generation/hypothesis/__init__.py,sha256=Dfdz6_Wa7ez6GxrMBxnWQnLaNjPUweTxfi01jiFqso4,4900
102
102
  schemathesis/generation/hypothesis/builder.py,sha256=ZdY68aDGeZLLtIld288KF_O6ardFKZdFMBogwltTx2o,38362
103
103
  schemathesis/generation/hypothesis/examples.py,sha256=6eGaKUEC3elmKsaqfKj1sLvM8EHc-PWT4NRBq4NI0Rs,1409
104
104
  schemathesis/generation/hypothesis/given.py,sha256=sTZR1of6XaHAPWtHx2_WLlZ50M8D5Rjux0GmWkWjDq4,2337
@@ -133,7 +133,7 @@ schemathesis/specs/openapi/_hypothesis.py,sha256=g5476s_ArzheWKHHlOfKwx46tqoiehP
133
133
  schemathesis/specs/openapi/checks.py,sha256=YYV6j6idyw2ubY4sLp-avs2OVEkAWeIihjT0xiV1RRA,30669
134
134
  schemathesis/specs/openapi/converter.py,sha256=4a6-8STT5snF7B-t6IsOIGdK5rV16oNqsdvWL7VFf2M,6472
135
135
  schemathesis/specs/openapi/definitions.py,sha256=8htclglV3fW6JPBqs59lgM4LnA25Mm9IptXBPb_qUT0,93949
136
- schemathesis/specs/openapi/examples.py,sha256=UquzOwy5QhmpHjFXv_QnZGpWZTU-N4CkQ1PQOxACIb8,22981
136
+ schemathesis/specs/openapi/examples.py,sha256=xddFhKUQFmh5wdHrSWPBAcaby6C3Jtvuo_hsVbRGN1w,24006
137
137
  schemathesis/specs/openapi/formats.py,sha256=4tYRdckauHxkJCmOhmdwDq_eOpHPaKloi89lzMPbPzw,3975
138
138
  schemathesis/specs/openapi/media_types.py,sha256=F5M6TKl0s6Z5X8mZpPsWDEdPBvxclKRcUOc41eEwKbo,2472
139
139
  schemathesis/specs/openapi/patterns.py,sha256=GqPZEXMRdWENQxanWjBOalIZ2MQUjuxk21kmdiI703E,18027
@@ -172,8 +172,8 @@ schemathesis/transport/prepare.py,sha256=erYXRaxpQokIDzaIuvt_csHcw72iHfCyNq8VNEz
172
172
  schemathesis/transport/requests.py,sha256=wriRI9fprTplE_qEZLEz1TerX6GwkE3pwr6ZnU2o6vQ,10648
173
173
  schemathesis/transport/serialization.py,sha256=GwO6OAVTmL1JyKw7HiZ256tjV4CbrRbhQN0ep1uaZwI,11157
174
174
  schemathesis/transport/wsgi.py,sha256=kQtasFre6pjdJWRKwLA_Qb-RyQHCFNpaey9ubzlFWKI,5907
175
- schemathesis-4.2.0.dist-info/METADATA,sha256=kkIlvuG4NKfAwk6kzHRslKYeYaaFhfSYciy9777c4L0,8540
176
- schemathesis-4.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
177
- schemathesis-4.2.0.dist-info/entry_points.txt,sha256=hiK3un-xfgPdwj9uj16YVDtTNpO128bmk0U82SMv8ZQ,152
178
- schemathesis-4.2.0.dist-info/licenses/LICENSE,sha256=2Ve4J8v5jMQAWrT7r1nf3bI8Vflk3rZVQefiF2zpxwg,1121
179
- schemathesis-4.2.0.dist-info/RECORD,,
175
+ schemathesis-4.2.2.dist-info/METADATA,sha256=x3D71YX9YO81-h_n1Emw98R4jxq6NoL33XbYe3ntWNc,8540
176
+ schemathesis-4.2.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
177
+ schemathesis-4.2.2.dist-info/entry_points.txt,sha256=hiK3un-xfgPdwj9uj16YVDtTNpO128bmk0U82SMv8ZQ,152
178
+ schemathesis-4.2.2.dist-info/licenses/LICENSE,sha256=2Ve4J8v5jMQAWrT7r1nf3bI8Vflk3rZVQefiF2zpxwg,1121
179
+ schemathesis-4.2.2.dist-info/RECORD,,