schemathesis 3.25.5__py3-none-any.whl → 3.39.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- schemathesis/__init__.py +6 -6
- schemathesis/_compat.py +2 -2
- schemathesis/_dependency_versions.py +4 -2
- schemathesis/_hypothesis.py +369 -56
- schemathesis/_lazy_import.py +1 -0
- schemathesis/_override.py +5 -4
- schemathesis/_patches.py +21 -0
- schemathesis/_rate_limiter.py +7 -0
- schemathesis/_xml.py +75 -22
- schemathesis/auths.py +78 -16
- schemathesis/checks.py +21 -9
- schemathesis/cli/__init__.py +793 -448
- schemathesis/cli/__main__.py +4 -0
- schemathesis/cli/callbacks.py +58 -13
- schemathesis/cli/cassettes.py +233 -47
- schemathesis/cli/constants.py +8 -2
- schemathesis/cli/context.py +24 -4
- schemathesis/cli/debug.py +2 -1
- schemathesis/cli/handlers.py +4 -1
- schemathesis/cli/junitxml.py +103 -22
- schemathesis/cli/options.py +15 -4
- schemathesis/cli/output/default.py +286 -115
- schemathesis/cli/output/short.py +25 -6
- schemathesis/cli/reporting.py +79 -0
- schemathesis/cli/sanitization.py +6 -0
- schemathesis/code_samples.py +5 -3
- schemathesis/constants.py +1 -0
- schemathesis/contrib/openapi/__init__.py +1 -1
- schemathesis/contrib/openapi/fill_missing_examples.py +3 -1
- schemathesis/contrib/openapi/formats/uuid.py +2 -1
- schemathesis/contrib/unique_data.py +3 -3
- schemathesis/exceptions.py +76 -65
- schemathesis/experimental/__init__.py +35 -0
- schemathesis/extra/_aiohttp.py +1 -0
- schemathesis/extra/_flask.py +4 -1
- schemathesis/extra/_server.py +1 -0
- schemathesis/extra/pytest_plugin.py +17 -25
- schemathesis/failures.py +77 -9
- schemathesis/filters.py +185 -8
- schemathesis/fixups/__init__.py +1 -0
- schemathesis/fixups/fast_api.py +2 -2
- schemathesis/fixups/utf8_bom.py +1 -2
- schemathesis/generation/__init__.py +20 -36
- schemathesis/generation/_hypothesis.py +59 -0
- schemathesis/generation/_methods.py +44 -0
- schemathesis/generation/coverage.py +931 -0
- schemathesis/graphql.py +0 -1
- schemathesis/hooks.py +89 -12
- schemathesis/internal/checks.py +84 -0
- schemathesis/internal/copy.py +22 -3
- schemathesis/internal/deprecation.py +6 -2
- schemathesis/internal/diff.py +15 -0
- schemathesis/internal/extensions.py +27 -0
- schemathesis/internal/jsonschema.py +2 -1
- schemathesis/internal/output.py +68 -0
- schemathesis/internal/result.py +1 -1
- schemathesis/internal/transformation.py +11 -0
- schemathesis/lazy.py +138 -25
- schemathesis/loaders.py +7 -5
- schemathesis/models.py +323 -213
- schemathesis/parameters.py +4 -0
- schemathesis/runner/__init__.py +72 -22
- schemathesis/runner/events.py +86 -6
- schemathesis/runner/impl/context.py +104 -0
- schemathesis/runner/impl/core.py +447 -187
- schemathesis/runner/impl/solo.py +19 -29
- schemathesis/runner/impl/threadpool.py +70 -79
- schemathesis/{cli → runner}/probes.py +37 -25
- schemathesis/runner/serialization.py +150 -17
- schemathesis/sanitization.py +5 -1
- schemathesis/schemas.py +170 -102
- schemathesis/serializers.py +17 -4
- schemathesis/service/ci.py +1 -0
- schemathesis/service/client.py +39 -6
- schemathesis/service/events.py +5 -1
- schemathesis/service/extensions.py +224 -0
- schemathesis/service/hosts.py +6 -2
- schemathesis/service/metadata.py +25 -0
- schemathesis/service/models.py +211 -2
- schemathesis/service/report.py +6 -6
- schemathesis/service/serialization.py +60 -71
- schemathesis/service/usage.py +1 -0
- schemathesis/specs/graphql/_cache.py +26 -0
- schemathesis/specs/graphql/loaders.py +25 -5
- schemathesis/specs/graphql/nodes.py +1 -0
- schemathesis/specs/graphql/scalars.py +2 -2
- schemathesis/specs/graphql/schemas.py +130 -100
- schemathesis/specs/graphql/validation.py +1 -2
- schemathesis/specs/openapi/__init__.py +1 -0
- schemathesis/specs/openapi/_cache.py +123 -0
- schemathesis/specs/openapi/_hypothesis.py +79 -61
- schemathesis/specs/openapi/checks.py +504 -25
- schemathesis/specs/openapi/converter.py +31 -4
- schemathesis/specs/openapi/definitions.py +10 -17
- schemathesis/specs/openapi/examples.py +143 -31
- schemathesis/specs/openapi/expressions/__init__.py +37 -2
- schemathesis/specs/openapi/expressions/context.py +1 -1
- schemathesis/specs/openapi/expressions/extractors.py +26 -0
- schemathesis/specs/openapi/expressions/lexer.py +20 -18
- schemathesis/specs/openapi/expressions/nodes.py +29 -6
- schemathesis/specs/openapi/expressions/parser.py +26 -5
- schemathesis/specs/openapi/formats.py +44 -0
- schemathesis/specs/openapi/links.py +125 -42
- schemathesis/specs/openapi/loaders.py +77 -36
- schemathesis/specs/openapi/media_types.py +34 -0
- schemathesis/specs/openapi/negative/__init__.py +6 -3
- schemathesis/specs/openapi/negative/mutations.py +21 -6
- schemathesis/specs/openapi/parameters.py +39 -25
- schemathesis/specs/openapi/patterns.py +137 -0
- schemathesis/specs/openapi/references.py +37 -7
- schemathesis/specs/openapi/schemas.py +368 -242
- schemathesis/specs/openapi/security.py +25 -7
- schemathesis/specs/openapi/serialization.py +1 -0
- schemathesis/specs/openapi/stateful/__init__.py +198 -70
- schemathesis/specs/openapi/stateful/statistic.py +198 -0
- schemathesis/specs/openapi/stateful/types.py +14 -0
- schemathesis/specs/openapi/utils.py +6 -1
- schemathesis/specs/openapi/validation.py +1 -0
- schemathesis/stateful/__init__.py +35 -21
- schemathesis/stateful/config.py +97 -0
- schemathesis/stateful/context.py +135 -0
- schemathesis/stateful/events.py +274 -0
- schemathesis/stateful/runner.py +309 -0
- schemathesis/stateful/sink.py +68 -0
- schemathesis/stateful/state_machine.py +67 -38
- schemathesis/stateful/statistic.py +22 -0
- schemathesis/stateful/validation.py +100 -0
- schemathesis/targets.py +33 -1
- schemathesis/throttling.py +25 -5
- schemathesis/transports/__init__.py +354 -0
- schemathesis/transports/asgi.py +7 -0
- schemathesis/transports/auth.py +25 -2
- schemathesis/transports/content_types.py +3 -1
- schemathesis/transports/headers.py +2 -1
- schemathesis/transports/responses.py +9 -4
- schemathesis/types.py +9 -0
- schemathesis/utils.py +11 -16
- schemathesis-3.39.7.dist-info/METADATA +293 -0
- schemathesis-3.39.7.dist-info/RECORD +160 -0
- {schemathesis-3.25.5.dist-info → schemathesis-3.39.7.dist-info}/WHEEL +1 -1
- schemathesis/specs/openapi/filters.py +0 -49
- schemathesis/specs/openapi/stateful/links.py +0 -92
- schemathesis-3.25.5.dist-info/METADATA +0 -356
- schemathesis-3.25.5.dist-info/RECORD +0 -134
- {schemathesis-3.25.5.dist-info → schemathesis-3.39.7.dist-info}/entry_points.txt +0 -0
- {schemathesis-3.25.5.dist-info → schemathesis-3.39.7.dist-info}/licenses/LICENSE +0 -0
schemathesis/runner/impl/core.py
CHANGED
|
@@ -1,14 +1,17 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import functools
|
|
2
4
|
import logging
|
|
5
|
+
import operator
|
|
3
6
|
import re
|
|
4
7
|
import threading
|
|
5
8
|
import time
|
|
6
9
|
import unittest
|
|
7
10
|
import uuid
|
|
11
|
+
import warnings
|
|
8
12
|
from contextlib import contextmanager
|
|
9
13
|
from dataclasses import dataclass, field
|
|
10
|
-
from
|
|
11
|
-
from typing import Any, Callable, Generator, Iterable, cast, TYPE_CHECKING, Literal
|
|
14
|
+
from typing import TYPE_CHECKING, Any, Callable, Generator, Iterable, List, Literal, cast
|
|
12
15
|
from warnings import WarningMessage, catch_warnings
|
|
13
16
|
|
|
14
17
|
import hypothesis
|
|
@@ -16,53 +19,74 @@ import requests
|
|
|
16
19
|
from _pytest.logging import LogCaptureHandler, catching_logs
|
|
17
20
|
from hypothesis.errors import HypothesisException, InvalidArgument
|
|
18
21
|
from hypothesis_jsonschema._canonicalise import HypothesisRefResolutionError
|
|
19
|
-
from jsonschema.exceptions import
|
|
20
|
-
from
|
|
22
|
+
from jsonschema.exceptions import SchemaError as JsonSchemaError
|
|
23
|
+
from jsonschema.exceptions import ValidationError
|
|
24
|
+
from requests.structures import CaseInsensitiveDict
|
|
25
|
+
from urllib3.exceptions import InsecureRequestWarning
|
|
21
26
|
|
|
22
|
-
from ...
|
|
23
|
-
from ... import failures, hooks
|
|
27
|
+
from ... import experimental, failures, hooks
|
|
24
28
|
from ..._compat import MultipleFailures
|
|
25
29
|
from ..._hypothesis import (
|
|
26
|
-
has_unsatisfied_example_mark,
|
|
27
|
-
get_non_serializable_mark,
|
|
28
|
-
get_invalid_regex_mark,
|
|
29
30
|
get_invalid_example_headers_mark,
|
|
31
|
+
get_invalid_regex_mark,
|
|
32
|
+
get_non_serializable_mark,
|
|
33
|
+
has_unsatisfied_example_mark,
|
|
30
34
|
)
|
|
31
35
|
from ...auths import unregister as unregister_auth
|
|
32
|
-
from ...
|
|
36
|
+
from ...checks import _make_max_response_time_failure_message
|
|
33
37
|
from ...constants import (
|
|
34
38
|
DEFAULT_STATEFUL_RECURSION_LIMIT,
|
|
35
39
|
RECURSIVE_REFERENCE_ERROR_MESSAGE,
|
|
36
|
-
USER_AGENT,
|
|
37
40
|
SERIALIZERS_SUGGESTION_MESSAGE,
|
|
41
|
+
USER_AGENT,
|
|
38
42
|
)
|
|
39
43
|
from ...exceptions import (
|
|
40
44
|
CheckFailed,
|
|
41
45
|
DeadlineExceeded,
|
|
46
|
+
InternalError,
|
|
47
|
+
InvalidHeadersExample,
|
|
42
48
|
InvalidRegularExpression,
|
|
43
49
|
NonCheckError,
|
|
44
50
|
OperationSchemaError,
|
|
51
|
+
RecursiveReferenceError,
|
|
52
|
+
SerializationNotPossible,
|
|
45
53
|
SkipTest,
|
|
54
|
+
format_exception,
|
|
46
55
|
get_grouped_exception,
|
|
47
56
|
maybe_set_assertion_message,
|
|
48
|
-
format_exception,
|
|
49
|
-
SerializationNotPossible,
|
|
50
|
-
InvalidHeadersExample,
|
|
51
57
|
)
|
|
58
|
+
from ...generation import DataGenerationMethod, GenerationConfig
|
|
52
59
|
from ...hooks import HookContext, get_all_by_name
|
|
53
|
-
from ...internal.
|
|
54
|
-
from ...models import APIOperation, Case, Check, CheckFunction, Status, TestResult, TestResultSet
|
|
55
|
-
from ...runner import events
|
|
60
|
+
from ...internal.checks import CheckConfig, CheckContext
|
|
56
61
|
from ...internal.datetime import current_datetime
|
|
57
|
-
from ...
|
|
62
|
+
from ...internal.result import Err, Ok, Result
|
|
63
|
+
from ...models import APIOperation, Case, Check, Status, TestResult
|
|
64
|
+
from ...runner import events
|
|
65
|
+
from ...service import extensions
|
|
66
|
+
from ...service.models import AnalysisResult, AnalysisSuccess
|
|
67
|
+
from ...specs.openapi import formats
|
|
58
68
|
from ...stateful import Feedback, Stateful
|
|
69
|
+
from ...stateful import events as stateful_events
|
|
70
|
+
from ...stateful import runner as stateful_runner
|
|
59
71
|
from ...targets import Target, TargetContext
|
|
60
|
-
from ...
|
|
72
|
+
from ...transports import RequestConfig, RequestsTransport
|
|
73
|
+
from ...transports.auth import get_requests_auth, prepare_wsgi_headers
|
|
61
74
|
from ...utils import capture_hypothesis_output
|
|
75
|
+
from .. import probes
|
|
62
76
|
from ..serialization import SerializedTestResult
|
|
77
|
+
from .context import RunnerContext
|
|
63
78
|
|
|
64
79
|
if TYPE_CHECKING:
|
|
65
|
-
from
|
|
80
|
+
from types import TracebackType
|
|
81
|
+
|
|
82
|
+
from requests.auth import HTTPDigestAuth
|
|
83
|
+
|
|
84
|
+
from ..._override import CaseOverride
|
|
85
|
+
from ...internal.checks import CheckFunction
|
|
86
|
+
from ...schemas import BaseSchema
|
|
87
|
+
from ...service.client import ServiceClient
|
|
88
|
+
from ...transports.responses import GenericResponse, WSGIResponse
|
|
89
|
+
from ...types import RawAuth
|
|
66
90
|
|
|
67
91
|
|
|
68
92
|
def _should_count_towards_stop(event: events.ExecutionEvent) -> bool:
|
|
@@ -76,23 +100,29 @@ class BaseRunner:
|
|
|
76
100
|
max_response_time: int | None
|
|
77
101
|
targets: Iterable[Target]
|
|
78
102
|
hypothesis_settings: hypothesis.settings
|
|
79
|
-
generation_config: GenerationConfig
|
|
103
|
+
generation_config: GenerationConfig | None
|
|
104
|
+
probe_config: probes.ProbeConfig
|
|
105
|
+
checks_config: CheckConfig
|
|
106
|
+
request_config: RequestConfig = field(default_factory=RequestConfig)
|
|
80
107
|
override: CaseOverride | None = None
|
|
81
108
|
auth: RawAuth | None = None
|
|
82
109
|
auth_type: str | None = None
|
|
83
110
|
headers: dict[str, Any] | None = None
|
|
84
|
-
request_timeout: int | None = None
|
|
85
111
|
store_interactions: bool = False
|
|
86
112
|
seed: int | None = None
|
|
87
113
|
exit_first: bool = False
|
|
114
|
+
no_failfast: bool = False
|
|
88
115
|
max_failures: int | None = None
|
|
89
116
|
started_at: str = field(default_factory=current_datetime)
|
|
117
|
+
unique_data: bool = False
|
|
90
118
|
dry_run: bool = False
|
|
91
119
|
stateful: Stateful | None = None
|
|
92
120
|
stateful_recursion_limit: int = DEFAULT_STATEFUL_RECURSION_LIMIT
|
|
93
121
|
count_operations: bool = True
|
|
94
122
|
count_links: bool = True
|
|
123
|
+
service_client: ServiceClient | None = None
|
|
95
124
|
_failures_counter: int = 0
|
|
125
|
+
_is_stopping_due_to_failure_limit: bool = False
|
|
96
126
|
|
|
97
127
|
def execute(self) -> EventStream:
|
|
98
128
|
"""Common logic for all runners."""
|
|
@@ -103,35 +133,106 @@ class BaseRunner:
|
|
|
103
133
|
# If auth is explicitly provided, then the global provider is ignored
|
|
104
134
|
if self.auth is not None:
|
|
105
135
|
unregister_auth()
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
136
|
+
ctx = RunnerContext(
|
|
137
|
+
auth=self.auth,
|
|
138
|
+
seed=self.seed,
|
|
139
|
+
stop_event=stop_event,
|
|
140
|
+
unique_data=self.unique_data,
|
|
141
|
+
checks_config=self.checks_config,
|
|
142
|
+
override=self.override,
|
|
143
|
+
no_failfast=self.no_failfast,
|
|
110
144
|
)
|
|
145
|
+
start_time = time.monotonic()
|
|
146
|
+
initialized = None
|
|
147
|
+
__probes = None
|
|
148
|
+
__analysis: Result[AnalysisResult, Exception] | None = None
|
|
149
|
+
|
|
150
|
+
def _initialize() -> events.Initialized:
|
|
151
|
+
nonlocal initialized
|
|
152
|
+
initialized = events.Initialized.from_schema(
|
|
153
|
+
schema=self.schema,
|
|
154
|
+
count_operations=self.count_operations,
|
|
155
|
+
count_links=self.count_links,
|
|
156
|
+
seed=ctx.seed,
|
|
157
|
+
start_time=start_time,
|
|
158
|
+
)
|
|
159
|
+
return initialized
|
|
111
160
|
|
|
112
161
|
def _finish() -> events.Finished:
|
|
113
|
-
if has_all_not_found
|
|
114
|
-
|
|
115
|
-
return events.Finished.from_results(results=
|
|
162
|
+
if ctx.has_all_not_found:
|
|
163
|
+
ctx.add_warning(ALL_NOT_FOUND_WARNING_MESSAGE)
|
|
164
|
+
return events.Finished.from_results(results=ctx.data, running_time=time.monotonic() - start_time)
|
|
116
165
|
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
166
|
+
def _before_probes() -> events.BeforeProbing:
|
|
167
|
+
return events.BeforeProbing()
|
|
168
|
+
|
|
169
|
+
def _run_probes() -> None:
|
|
170
|
+
if not self.dry_run:
|
|
171
|
+
nonlocal __probes
|
|
172
|
+
|
|
173
|
+
__probes = run_probes(self.schema, self.probe_config)
|
|
120
174
|
|
|
121
|
-
|
|
175
|
+
def _after_probes() -> events.AfterProbing:
|
|
176
|
+
_probes = cast(List[probes.ProbeRun], __probes)
|
|
177
|
+
return events.AfterProbing(probes=_probes)
|
|
122
178
|
|
|
123
|
-
|
|
179
|
+
def _before_analysis() -> events.BeforeAnalysis:
|
|
180
|
+
return events.BeforeAnalysis()
|
|
181
|
+
|
|
182
|
+
def _run_analysis() -> None:
|
|
183
|
+
nonlocal __analysis, __probes
|
|
184
|
+
|
|
185
|
+
if self.service_client is not None:
|
|
186
|
+
try:
|
|
187
|
+
_probes = cast(List[probes.ProbeRun], __probes)
|
|
188
|
+
result = self.service_client.analyze_schema(_probes, self.schema.raw_schema)
|
|
189
|
+
if isinstance(result, AnalysisSuccess):
|
|
190
|
+
extensions.apply(result.extensions, self.schema)
|
|
191
|
+
__analysis = Ok(result)
|
|
192
|
+
except Exception as exc:
|
|
193
|
+
__analysis = Err(exc)
|
|
194
|
+
|
|
195
|
+
def _after_analysis() -> events.AfterAnalysis:
|
|
196
|
+
return events.AfterAnalysis(analysis=__analysis)
|
|
197
|
+
|
|
198
|
+
if ctx.is_stopped:
|
|
124
199
|
yield _finish()
|
|
125
200
|
return
|
|
126
201
|
|
|
202
|
+
for event_factory in (
|
|
203
|
+
_initialize,
|
|
204
|
+
_before_probes,
|
|
205
|
+
_run_probes,
|
|
206
|
+
_after_probes,
|
|
207
|
+
_before_analysis,
|
|
208
|
+
_run_analysis,
|
|
209
|
+
_after_analysis,
|
|
210
|
+
):
|
|
211
|
+
event = event_factory()
|
|
212
|
+
if event is not None:
|
|
213
|
+
yield event
|
|
214
|
+
if ctx.is_stopped:
|
|
215
|
+
yield _finish() # type: ignore[unreachable]
|
|
216
|
+
return
|
|
217
|
+
|
|
127
218
|
try:
|
|
128
|
-
|
|
219
|
+
warnings.simplefilter("ignore", InsecureRequestWarning)
|
|
220
|
+
if not experimental.STATEFUL_ONLY.is_enabled:
|
|
221
|
+
yield from self._execute(ctx)
|
|
222
|
+
if not self._is_stopping_due_to_failure_limit:
|
|
223
|
+
yield from self._run_stateful_tests(ctx)
|
|
129
224
|
except KeyboardInterrupt:
|
|
130
225
|
yield events.Interrupted()
|
|
131
226
|
|
|
132
227
|
yield _finish()
|
|
133
228
|
|
|
134
229
|
def _should_stop(self, event: events.ExecutionEvent) -> bool:
|
|
230
|
+
result = self.__should_stop(event)
|
|
231
|
+
if result:
|
|
232
|
+
self._is_stopping_due_to_failure_limit = True
|
|
233
|
+
return result
|
|
234
|
+
|
|
235
|
+
def __should_stop(self, event: events.ExecutionEvent) -> bool:
|
|
135
236
|
if _should_count_towards_stop(event):
|
|
136
237
|
if self.exit_first:
|
|
137
238
|
return True
|
|
@@ -140,19 +241,116 @@ class BaseRunner:
|
|
|
140
241
|
return self._failures_counter >= self.max_failures
|
|
141
242
|
return False
|
|
142
243
|
|
|
143
|
-
def _execute(
|
|
144
|
-
self, results: TestResultSet, stop_event: threading.Event
|
|
145
|
-
) -> Generator[events.ExecutionEvent, None, None]:
|
|
244
|
+
def _execute(self, ctx: RunnerContext) -> Generator[events.ExecutionEvent, None, None]:
|
|
146
245
|
raise NotImplementedError
|
|
147
246
|
|
|
247
|
+
def _run_stateful_tests(self, ctx: RunnerContext) -> Generator[events.ExecutionEvent, None, None]:
|
|
248
|
+
# Run new-style stateful tests
|
|
249
|
+
if self.stateful is not None and experimental.STATEFUL_TEST_RUNNER.is_enabled and self.schema.links_count > 0:
|
|
250
|
+
result = TestResult(
|
|
251
|
+
method="",
|
|
252
|
+
path="",
|
|
253
|
+
verbose_name="Stateful tests",
|
|
254
|
+
seed=ctx.seed,
|
|
255
|
+
data_generation_method=self.schema.data_generation_methods,
|
|
256
|
+
)
|
|
257
|
+
headers = self.headers or {}
|
|
258
|
+
if isinstance(self.schema.transport, RequestsTransport):
|
|
259
|
+
auth = get_requests_auth(self.auth, self.auth_type)
|
|
260
|
+
else:
|
|
261
|
+
auth = None
|
|
262
|
+
headers = prepare_wsgi_headers(headers, self.auth, self.auth_type)
|
|
263
|
+
config = stateful_runner.StatefulTestRunnerConfig(
|
|
264
|
+
checks=tuple(self.checks),
|
|
265
|
+
headers=headers,
|
|
266
|
+
hypothesis_settings=self.hypothesis_settings,
|
|
267
|
+
exit_first=self.exit_first,
|
|
268
|
+
max_failures=None if self.max_failures is None else self.max_failures - self._failures_counter,
|
|
269
|
+
request=self.request_config,
|
|
270
|
+
auth=auth,
|
|
271
|
+
seed=ctx.seed,
|
|
272
|
+
override=self.override,
|
|
273
|
+
)
|
|
274
|
+
state_machine = self.schema.as_state_machine()
|
|
275
|
+
runner = state_machine.runner(config=config)
|
|
276
|
+
status = Status.success
|
|
277
|
+
|
|
278
|
+
def from_step_status(step_status: stateful_events.StepStatus) -> Status:
|
|
279
|
+
return {
|
|
280
|
+
stateful_events.StepStatus.SUCCESS: Status.success,
|
|
281
|
+
stateful_events.StepStatus.FAILURE: Status.failure,
|
|
282
|
+
stateful_events.StepStatus.ERROR: Status.error,
|
|
283
|
+
stateful_events.StepStatus.INTERRUPTED: Status.error,
|
|
284
|
+
}[step_status]
|
|
285
|
+
|
|
286
|
+
if self.store_interactions:
|
|
287
|
+
if isinstance(state_machine.schema.transport, RequestsTransport):
|
|
288
|
+
|
|
289
|
+
def on_step_finished(event: stateful_events.StepFinished) -> None:
|
|
290
|
+
if event.response is not None and event.status is not None:
|
|
291
|
+
response = cast(requests.Response, event.response)
|
|
292
|
+
result.store_requests_response(
|
|
293
|
+
status=from_step_status(event.status),
|
|
294
|
+
case=event.case,
|
|
295
|
+
response=response,
|
|
296
|
+
checks=event.checks,
|
|
297
|
+
headers=headers,
|
|
298
|
+
session=None,
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
else:
|
|
302
|
+
|
|
303
|
+
def on_step_finished(event: stateful_events.StepFinished) -> None:
|
|
304
|
+
from ...transports.responses import WSGIResponse
|
|
305
|
+
|
|
306
|
+
if event.response is not None and event.status is not None:
|
|
307
|
+
response = cast(WSGIResponse, event.response)
|
|
308
|
+
result.store_wsgi_response(
|
|
309
|
+
status=from_step_status(event.status),
|
|
310
|
+
case=event.case,
|
|
311
|
+
response=response,
|
|
312
|
+
headers=headers,
|
|
313
|
+
elapsed=response.elapsed.total_seconds(),
|
|
314
|
+
checks=event.checks,
|
|
315
|
+
)
|
|
316
|
+
else:
|
|
317
|
+
|
|
318
|
+
def on_step_finished(event: stateful_events.StepFinished) -> None:
|
|
319
|
+
return None
|
|
320
|
+
|
|
321
|
+
test_start_time: float | None = None
|
|
322
|
+
test_elapsed_time: float | None = None
|
|
323
|
+
|
|
324
|
+
for stateful_event in runner.execute():
|
|
325
|
+
if isinstance(stateful_event, stateful_events.SuiteFinished):
|
|
326
|
+
if stateful_event.failures and status != Status.error:
|
|
327
|
+
status = Status.failure
|
|
328
|
+
elif isinstance(stateful_event, stateful_events.RunStarted):
|
|
329
|
+
test_start_time = stateful_event.timestamp
|
|
330
|
+
elif isinstance(stateful_event, stateful_events.RunFinished):
|
|
331
|
+
test_elapsed_time = stateful_event.timestamp - cast(float, test_start_time)
|
|
332
|
+
elif isinstance(stateful_event, stateful_events.StepFinished):
|
|
333
|
+
result.checks.extend(stateful_event.checks)
|
|
334
|
+
on_step_finished(stateful_event)
|
|
335
|
+
elif isinstance(stateful_event, stateful_events.Errored):
|
|
336
|
+
status = Status.error
|
|
337
|
+
result.add_error(stateful_event.exception)
|
|
338
|
+
yield events.StatefulEvent(data=stateful_event)
|
|
339
|
+
ctx.add_result(result)
|
|
340
|
+
yield events.AfterStatefulExecution(
|
|
341
|
+
status=status,
|
|
342
|
+
result=SerializedTestResult.from_test_result(result),
|
|
343
|
+
elapsed_time=cast(float, test_elapsed_time),
|
|
344
|
+
data_generation_method=self.schema.data_generation_methods,
|
|
345
|
+
)
|
|
346
|
+
|
|
148
347
|
def _run_tests(
|
|
149
348
|
self,
|
|
150
349
|
maker: Callable,
|
|
151
|
-
|
|
350
|
+
test_func: Callable,
|
|
152
351
|
settings: hypothesis.settings,
|
|
153
|
-
generation_config: GenerationConfig,
|
|
154
|
-
|
|
155
|
-
results: TestResultSet,
|
|
352
|
+
generation_config: GenerationConfig | None,
|
|
353
|
+
ctx: RunnerContext,
|
|
156
354
|
recursion_level: int = 0,
|
|
157
355
|
headers: dict[str, Any] | None = None,
|
|
158
356
|
**kwargs: Any,
|
|
@@ -172,15 +370,18 @@ class BaseRunner:
|
|
|
172
370
|
return kw
|
|
173
371
|
|
|
174
372
|
for result in maker(
|
|
175
|
-
|
|
373
|
+
test_func,
|
|
176
374
|
settings=settings,
|
|
177
375
|
generation_config=generation_config,
|
|
178
|
-
seed=seed,
|
|
376
|
+
seed=ctx.seed,
|
|
179
377
|
as_strategy_kwargs=as_strategy_kwargs,
|
|
180
378
|
):
|
|
181
379
|
if isinstance(result, Ok):
|
|
182
380
|
operation, test = result.ok()
|
|
183
|
-
|
|
381
|
+
if self.stateful is not None and not experimental.STATEFUL_TEST_RUNNER.is_enabled:
|
|
382
|
+
feedback = Feedback(self.stateful, operation)
|
|
383
|
+
else:
|
|
384
|
+
feedback = None
|
|
184
385
|
# Track whether `BeforeExecution` was already emitted.
|
|
185
386
|
# Schema error may happen before / after `BeforeExecution`, but it should be emitted only once
|
|
186
387
|
# and the `AfterExecution` event should have the same correlation id as previous `BeforeExecution`
|
|
@@ -189,7 +390,7 @@ class BaseRunner:
|
|
|
189
390
|
for event in run_test(
|
|
190
391
|
operation,
|
|
191
392
|
test,
|
|
192
|
-
|
|
393
|
+
ctx=ctx,
|
|
193
394
|
feedback=feedback,
|
|
194
395
|
recursion_level=recursion_level,
|
|
195
396
|
data_generation_methods=self.schema.data_generation_methods,
|
|
@@ -202,30 +403,39 @@ class BaseRunner:
|
|
|
202
403
|
if isinstance(event, events.Interrupted):
|
|
203
404
|
return
|
|
204
405
|
# Additional tests, generated via the `feedback` instance
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
406
|
+
if feedback is not None:
|
|
407
|
+
yield from self._run_tests(
|
|
408
|
+
feedback.get_stateful_tests,
|
|
409
|
+
test_func,
|
|
410
|
+
settings=settings,
|
|
411
|
+
generation_config=generation_config,
|
|
412
|
+
recursion_level=recursion_level + 1,
|
|
413
|
+
ctx=ctx,
|
|
414
|
+
headers=headers,
|
|
415
|
+
**kwargs,
|
|
416
|
+
)
|
|
216
417
|
except OperationSchemaError as exc:
|
|
217
418
|
yield from handle_schema_error(
|
|
218
419
|
exc,
|
|
219
|
-
|
|
420
|
+
ctx,
|
|
220
421
|
self.schema.data_generation_methods,
|
|
221
422
|
recursion_level,
|
|
222
423
|
before_execution_correlation_id=before_execution_correlation_id,
|
|
223
424
|
)
|
|
224
425
|
else:
|
|
225
426
|
# Schema errors
|
|
226
|
-
yield from handle_schema_error(
|
|
227
|
-
|
|
228
|
-
|
|
427
|
+
yield from handle_schema_error(result.err(), ctx, self.schema.data_generation_methods, recursion_level)
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
def run_probes(schema: BaseSchema, config: probes.ProbeConfig) -> list[probes.ProbeRun]:
|
|
431
|
+
"""Discover capabilities of the tested app."""
|
|
432
|
+
results = probes.run(schema, config)
|
|
433
|
+
for result in results:
|
|
434
|
+
if isinstance(result.probe, probes.NullByteInHeader) and result.is_failure:
|
|
435
|
+
from ...specs.openapi.formats import HEADER_FORMAT, header_values
|
|
436
|
+
|
|
437
|
+
formats.register(HEADER_FORMAT, header_values(blacklist_characters="\n\r\x00"))
|
|
438
|
+
return results
|
|
229
439
|
|
|
230
440
|
|
|
231
441
|
@dataclass
|
|
@@ -259,7 +469,7 @@ class EventStream:
|
|
|
259
469
|
|
|
260
470
|
def handle_schema_error(
|
|
261
471
|
error: OperationSchemaError,
|
|
262
|
-
|
|
472
|
+
ctx: RunnerContext,
|
|
263
473
|
data_generation_methods: Iterable[DataGenerationMethod],
|
|
264
474
|
recursion_level: int,
|
|
265
475
|
*,
|
|
@@ -304,11 +514,11 @@ def handle_schema_error(
|
|
|
304
514
|
hypothesis_output=[],
|
|
305
515
|
correlation_id=correlation_id,
|
|
306
516
|
)
|
|
307
|
-
|
|
517
|
+
ctx.add_result(result)
|
|
308
518
|
else:
|
|
309
519
|
# When there is no `method`, then the schema error may cover multiple operations, and we can't display it in
|
|
310
520
|
# the progress bar
|
|
311
|
-
|
|
521
|
+
ctx.add_generic_error(error)
|
|
312
522
|
|
|
313
523
|
|
|
314
524
|
def run_test(
|
|
@@ -317,7 +527,7 @@ def run_test(
|
|
|
317
527
|
checks: Iterable[CheckFunction],
|
|
318
528
|
data_generation_methods: Iterable[DataGenerationMethod],
|
|
319
529
|
targets: Iterable[Target],
|
|
320
|
-
|
|
530
|
+
ctx: RunnerContext,
|
|
321
531
|
headers: dict[str, Any] | None,
|
|
322
532
|
recursion_level: int,
|
|
323
533
|
**kwargs: Any,
|
|
@@ -342,12 +552,35 @@ def run_test(
|
|
|
342
552
|
errors: list[Exception] = []
|
|
343
553
|
test_start_time = time.monotonic()
|
|
344
554
|
setup_hypothesis_database_key(test, operation)
|
|
555
|
+
|
|
556
|
+
def _on_flaky(exc: Exception) -> Status:
|
|
557
|
+
if isinstance(exc.__cause__, hypothesis.errors.DeadlineExceeded):
|
|
558
|
+
status = Status.error
|
|
559
|
+
result.add_error(DeadlineExceeded.from_exc(exc.__cause__))
|
|
560
|
+
elif (
|
|
561
|
+
hasattr(hypothesis.errors, "FlakyFailure")
|
|
562
|
+
and isinstance(exc, hypothesis.errors.FlakyFailure)
|
|
563
|
+
and any(isinstance(subexc, hypothesis.errors.DeadlineExceeded) for subexc in exc.exceptions)
|
|
564
|
+
):
|
|
565
|
+
for sub_exc in exc.exceptions:
|
|
566
|
+
if isinstance(sub_exc, hypothesis.errors.DeadlineExceeded):
|
|
567
|
+
result.add_error(DeadlineExceeded.from_exc(sub_exc))
|
|
568
|
+
status = Status.error
|
|
569
|
+
elif errors:
|
|
570
|
+
status = Status.error
|
|
571
|
+
add_errors(result, errors)
|
|
572
|
+
else:
|
|
573
|
+
status = Status.failure
|
|
574
|
+
result.mark_flaky()
|
|
575
|
+
return status
|
|
576
|
+
|
|
345
577
|
try:
|
|
346
578
|
with catch_warnings(record=True) as warnings, capture_hypothesis_output() as hypothesis_output:
|
|
347
579
|
test(
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
580
|
+
ctx=ctx,
|
|
581
|
+
checks=checks,
|
|
582
|
+
targets=targets,
|
|
583
|
+
result=result,
|
|
351
584
|
errors=errors,
|
|
352
585
|
headers=headers,
|
|
353
586
|
data_generation_methods=data_generation_methods,
|
|
@@ -371,6 +604,8 @@ def run_test(
|
|
|
371
604
|
result.mark_errored()
|
|
372
605
|
for error in deduplicate_errors(errors):
|
|
373
606
|
result.add_error(error)
|
|
607
|
+
except hypothesis.errors.Flaky as exc:
|
|
608
|
+
status = _on_flaky(exc)
|
|
374
609
|
except MultipleFailures:
|
|
375
610
|
# Schemathesis may detect multiple errors that come from different check results
|
|
376
611
|
# They raise different "grouped" exceptions
|
|
@@ -379,16 +614,6 @@ def run_test(
|
|
|
379
614
|
add_errors(result, errors)
|
|
380
615
|
else:
|
|
381
616
|
status = Status.failure
|
|
382
|
-
except hypothesis.errors.Flaky as exc:
|
|
383
|
-
if isinstance(exc.__cause__, hypothesis.errors.DeadlineExceeded):
|
|
384
|
-
status = Status.error
|
|
385
|
-
result.add_error(DeadlineExceeded.from_exc(exc.__cause__))
|
|
386
|
-
elif errors:
|
|
387
|
-
status = Status.error
|
|
388
|
-
add_errors(result, errors)
|
|
389
|
-
else:
|
|
390
|
-
status = Status.failure
|
|
391
|
-
result.mark_flaky()
|
|
392
617
|
except hypothesis.errors.Unsatisfiable:
|
|
393
618
|
# We need more clear error message here
|
|
394
619
|
status = Status.error
|
|
@@ -399,13 +624,29 @@ def run_test(
|
|
|
399
624
|
except SkipTest as exc:
|
|
400
625
|
status = Status.skip
|
|
401
626
|
result.mark_skipped(exc)
|
|
402
|
-
except AssertionError: #
|
|
403
|
-
error = reraise(operation)
|
|
627
|
+
except AssertionError as exc: # May come from `hypothesis-jsonschema` or `hypothesis`
|
|
404
628
|
status = Status.error
|
|
629
|
+
try:
|
|
630
|
+
operation.schema.validate()
|
|
631
|
+
msg = "Unexpected error during testing of this API operation"
|
|
632
|
+
exc_msg = str(exc)
|
|
633
|
+
if exc_msg:
|
|
634
|
+
msg += f": {exc_msg}"
|
|
635
|
+
try:
|
|
636
|
+
raise InternalError(msg) from exc
|
|
637
|
+
except InternalError as exc:
|
|
638
|
+
error = exc
|
|
639
|
+
except ValidationError as exc:
|
|
640
|
+
error = OperationSchemaError.from_jsonschema_error(
|
|
641
|
+
exc,
|
|
642
|
+
path=operation.path,
|
|
643
|
+
method=operation.method,
|
|
644
|
+
full_path=operation.schema.get_full_path(operation.path),
|
|
645
|
+
)
|
|
405
646
|
result.add_error(error)
|
|
406
647
|
except HypothesisRefResolutionError:
|
|
407
648
|
status = Status.error
|
|
408
|
-
result.add_error(
|
|
649
|
+
result.add_error(RecursiveReferenceError(RECURSIVE_REFERENCE_ERROR_MESSAGE))
|
|
409
650
|
except InvalidArgument as error:
|
|
410
651
|
status = Status.error
|
|
411
652
|
message = get_invalid_regular_expression_message(warnings)
|
|
@@ -433,6 +674,8 @@ def run_test(
|
|
|
433
674
|
)
|
|
434
675
|
else:
|
|
435
676
|
result.add_error(error)
|
|
677
|
+
if status == Status.success and ctx.no_failfast and any(check.value == Status.failure for check in result.checks):
|
|
678
|
+
status = Status.failure
|
|
436
679
|
if has_unsatisfied_example_mark(test):
|
|
437
680
|
status = Status.error
|
|
438
681
|
result.add_error(
|
|
@@ -464,10 +707,10 @@ def run_test(
|
|
|
464
707
|
result.seed = getattr(test, "_hypothesis_internal_use_seed", None) or getattr(
|
|
465
708
|
test, "_hypothesis_internal_use_generated_seed", None
|
|
466
709
|
)
|
|
467
|
-
|
|
710
|
+
ctx.add_result(result)
|
|
468
711
|
for status_code in (401, 403):
|
|
469
712
|
if has_too_many_responses_with_status(result, status_code):
|
|
470
|
-
|
|
713
|
+
ctx.add_warning(TOO_MANY_RESPONSES_WARNING_TEMPLATE.format(f"`{operation.verbose_name}`", status_code))
|
|
471
714
|
yield events.AfterExecution.from_result(
|
|
472
715
|
result=result,
|
|
473
716
|
status=status,
|
|
@@ -502,22 +745,6 @@ def has_too_many_responses_with_status(result: TestResult, status_code: int) ->
|
|
|
502
745
|
ALL_NOT_FOUND_WARNING_MESSAGE = "All API responses have a 404 status code. Did you specify the proper API location?"
|
|
503
746
|
|
|
504
747
|
|
|
505
|
-
def has_all_not_found(results: TestResultSet) -> bool:
|
|
506
|
-
"""Check if all responses are 404."""
|
|
507
|
-
has_not_found = False
|
|
508
|
-
for result in results.results:
|
|
509
|
-
for check in result.checks:
|
|
510
|
-
if check.response is not None:
|
|
511
|
-
if check.response.status_code == 404:
|
|
512
|
-
has_not_found = True
|
|
513
|
-
else:
|
|
514
|
-
# There are non-404 responses, no reason to check any other response
|
|
515
|
-
return False
|
|
516
|
-
# Only happens if all responses are 404, or there are no responses at all.
|
|
517
|
-
# In the first case, it returns True, for the latter - False
|
|
518
|
-
return has_not_found
|
|
519
|
-
|
|
520
|
-
|
|
521
748
|
def setup_hypothesis_database_key(test: Callable, operation: APIOperation) -> None:
|
|
522
749
|
"""Make Hypothesis use separate database entries for every API operation.
|
|
523
750
|
|
|
@@ -526,7 +753,7 @@ def setup_hypothesis_database_key(test: Callable, operation: APIOperation) -> No
|
|
|
526
753
|
# Hypothesis's function digest depends on the test function signature. To reflect it for the web API case,
|
|
527
754
|
# we use all API operation parameters in the digest.
|
|
528
755
|
extra = operation.verbose_name.encode("utf8")
|
|
529
|
-
for parameter in operation.
|
|
756
|
+
for parameter in operation.iter_parameters():
|
|
530
757
|
extra += parameter.serialize(operation).encode("utf8")
|
|
531
758
|
test.hypothesis.inner_test._hypothesis_internal_add_digest = extra # type: ignore
|
|
532
759
|
|
|
@@ -539,16 +766,6 @@ def get_invalid_regular_expression_message(warnings: list[WarningMessage]) -> st
|
|
|
539
766
|
return None
|
|
540
767
|
|
|
541
768
|
|
|
542
|
-
def reraise(operation: APIOperation) -> OperationSchemaError:
|
|
543
|
-
try:
|
|
544
|
-
operation.schema.validate()
|
|
545
|
-
except ValidationError as exc:
|
|
546
|
-
return OperationSchemaError.from_jsonschema_error(
|
|
547
|
-
exc, path=operation.path, method=operation.method, full_path=operation.schema.get_full_path(operation.path)
|
|
548
|
-
)
|
|
549
|
-
return OperationSchemaError("Unknown schema error")
|
|
550
|
-
|
|
551
|
-
|
|
552
769
|
MEMORY_ADDRESS_RE = re.compile("0x[0-9a-fA-F]+")
|
|
553
770
|
URL_IN_ERROR_MESSAGE_RE = re.compile(r"Max retries exceeded with url: .*? \(Caused by")
|
|
554
771
|
|
|
@@ -564,7 +781,9 @@ def group_errors(errors: list[Exception]) -> None:
|
|
|
564
781
|
serialization_errors = [error for error in errors if isinstance(error, SerializationNotPossible)]
|
|
565
782
|
if len(serialization_errors) > 1:
|
|
566
783
|
errors[:] = [error for error in errors if not isinstance(error, SerializationNotPossible)]
|
|
567
|
-
media_types =
|
|
784
|
+
media_types: list[str] = functools.reduce(
|
|
785
|
+
operator.iadd, (entry.media_types for entry in serialization_errors), []
|
|
786
|
+
)
|
|
568
787
|
errors.append(SerializationNotPossible.from_media_types(*media_types))
|
|
569
788
|
|
|
570
789
|
|
|
@@ -589,12 +808,14 @@ def deduplicate_errors(errors: list[Exception]) -> Generator[Exception, None, No
|
|
|
589
808
|
def run_checks(
|
|
590
809
|
*,
|
|
591
810
|
case: Case,
|
|
811
|
+
ctx: CheckContext,
|
|
592
812
|
checks: Iterable[CheckFunction],
|
|
593
813
|
check_results: list[Check],
|
|
594
814
|
result: TestResult,
|
|
595
815
|
response: GenericResponse,
|
|
596
816
|
elapsed_time: float,
|
|
597
817
|
max_response_time: int | None = None,
|
|
818
|
+
no_failfast: bool,
|
|
598
819
|
) -> None:
|
|
599
820
|
errors = []
|
|
600
821
|
|
|
@@ -611,7 +832,7 @@ def run_checks(
|
|
|
611
832
|
check_name = check.__name__
|
|
612
833
|
copied_case = case.partial_deepcopy()
|
|
613
834
|
try:
|
|
614
|
-
skip_check = check(response, copied_case)
|
|
835
|
+
skip_check = check(ctx, response, copied_case)
|
|
615
836
|
if not skip_check:
|
|
616
837
|
check_result = result.add_success(check_name, copied_case, response, elapsed_time)
|
|
617
838
|
check_results.append(check_result)
|
|
@@ -623,7 +844,7 @@ def run_checks(
|
|
|
623
844
|
|
|
624
845
|
if max_response_time:
|
|
625
846
|
if elapsed_time > max_response_time:
|
|
626
|
-
message =
|
|
847
|
+
message = _make_max_response_time_failure_message(elapsed_time, max_response_time)
|
|
627
848
|
errors.append(AssertionError(message))
|
|
628
849
|
result.add_failure(
|
|
629
850
|
"max_response_time",
|
|
@@ -636,7 +857,7 @@ def run_checks(
|
|
|
636
857
|
else:
|
|
637
858
|
result.add_success("max_response_time", case, response, elapsed_time)
|
|
638
859
|
|
|
639
|
-
if errors:
|
|
860
|
+
if errors and not no_failfast:
|
|
640
861
|
raise get_grouped_exception(case.operation.verbose_name, *errors)(causes=tuple(errors))
|
|
641
862
|
|
|
642
863
|
|
|
@@ -697,19 +918,42 @@ def _force_data_generation_method(values: list[DataGenerationMethod], case: Case
|
|
|
697
918
|
values[:] = [data_generation_method]
|
|
698
919
|
|
|
699
920
|
|
|
921
|
+
def cached_test_func(f: Callable) -> Callable:
|
|
922
|
+
def wrapped(*, ctx: RunnerContext, case: Case, **kwargs: Any) -> None:
|
|
923
|
+
if ctx.unique_data:
|
|
924
|
+
cached = ctx.get_cached_outcome(case)
|
|
925
|
+
if isinstance(cached, BaseException):
|
|
926
|
+
raise cached
|
|
927
|
+
elif cached is None:
|
|
928
|
+
return None
|
|
929
|
+
try:
|
|
930
|
+
f(ctx=ctx, case=case, **kwargs)
|
|
931
|
+
except BaseException as exc:
|
|
932
|
+
ctx.cache_outcome(case, exc)
|
|
933
|
+
raise
|
|
934
|
+
else:
|
|
935
|
+
ctx.cache_outcome(case, None)
|
|
936
|
+
else:
|
|
937
|
+
f(ctx=ctx, case=case, **kwargs)
|
|
938
|
+
|
|
939
|
+
wrapped.__name__ = f.__name__
|
|
940
|
+
|
|
941
|
+
return wrapped
|
|
942
|
+
|
|
943
|
+
|
|
944
|
+
@cached_test_func
|
|
700
945
|
def network_test(
|
|
946
|
+
*,
|
|
947
|
+
ctx: RunnerContext,
|
|
701
948
|
case: Case,
|
|
702
949
|
checks: Iterable[CheckFunction],
|
|
703
950
|
targets: Iterable[Target],
|
|
704
951
|
result: TestResult,
|
|
705
952
|
session: requests.Session,
|
|
706
|
-
|
|
707
|
-
request_tls_verify: bool,
|
|
708
|
-
request_proxy: str | None,
|
|
709
|
-
request_cert: RequestCert | None,
|
|
953
|
+
request_config: RequestConfig,
|
|
710
954
|
store_interactions: bool,
|
|
711
955
|
headers: dict[str, Any] | None,
|
|
712
|
-
feedback: Feedback,
|
|
956
|
+
feedback: Feedback | None,
|
|
713
957
|
max_response_time: int | None,
|
|
714
958
|
data_generation_methods: list[DataGenerationMethod],
|
|
715
959
|
dry_run: bool,
|
|
@@ -722,85 +966,97 @@ def network_test(
|
|
|
722
966
|
headers = headers or {}
|
|
723
967
|
if "user-agent" not in {header.lower() for header in headers}:
|
|
724
968
|
headers["User-Agent"] = USER_AGENT
|
|
725
|
-
timeout = prepare_timeout(request_timeout)
|
|
726
969
|
if not dry_run:
|
|
727
970
|
args = (
|
|
971
|
+
ctx,
|
|
728
972
|
checks,
|
|
729
973
|
targets,
|
|
730
974
|
result,
|
|
731
975
|
session,
|
|
732
|
-
|
|
976
|
+
request_config,
|
|
733
977
|
store_interactions,
|
|
734
978
|
headers,
|
|
735
979
|
feedback,
|
|
736
|
-
request_tls_verify,
|
|
737
|
-
request_proxy,
|
|
738
|
-
request_cert,
|
|
739
980
|
max_response_time,
|
|
740
981
|
)
|
|
741
982
|
response = _network_test(case, *args)
|
|
742
983
|
add_cases(case, response, _network_test, *args)
|
|
984
|
+
elif store_interactions:
|
|
985
|
+
result.store_requests_response(case, None, Status.skip, [], headers=headers, session=session)
|
|
743
986
|
|
|
744
987
|
|
|
745
988
|
def _network_test(
|
|
746
989
|
case: Case,
|
|
990
|
+
ctx: RunnerContext,
|
|
747
991
|
checks: Iterable[CheckFunction],
|
|
748
992
|
targets: Iterable[Target],
|
|
749
993
|
result: TestResult,
|
|
750
994
|
session: requests.Session,
|
|
751
|
-
|
|
995
|
+
request_config: RequestConfig,
|
|
752
996
|
store_interactions: bool,
|
|
753
997
|
headers: dict[str, Any] | None,
|
|
754
|
-
feedback: Feedback,
|
|
755
|
-
request_tls_verify: bool,
|
|
756
|
-
request_proxy: str | None,
|
|
757
|
-
request_cert: RequestCert | None,
|
|
998
|
+
feedback: Feedback | None,
|
|
758
999
|
max_response_time: int | None,
|
|
759
1000
|
) -> requests.Response:
|
|
760
1001
|
check_results: list[Check] = []
|
|
1002
|
+
hook_context = HookContext(operation=case.operation)
|
|
1003
|
+
kwargs: dict[str, Any] = {
|
|
1004
|
+
"session": session,
|
|
1005
|
+
"headers": headers,
|
|
1006
|
+
"timeout": request_config.prepared_timeout,
|
|
1007
|
+
"verify": request_config.tls_verify,
|
|
1008
|
+
"cert": request_config.cert,
|
|
1009
|
+
}
|
|
1010
|
+
if request_config.proxy is not None:
|
|
1011
|
+
kwargs["proxies"] = {"all": request_config.proxy}
|
|
1012
|
+
hooks.dispatch("process_call_kwargs", hook_context, case, kwargs)
|
|
761
1013
|
try:
|
|
762
|
-
hook_context = HookContext(operation=case.operation)
|
|
763
|
-
kwargs: dict[str, Any] = {
|
|
764
|
-
"session": session,
|
|
765
|
-
"headers": headers,
|
|
766
|
-
"timeout": timeout,
|
|
767
|
-
"verify": request_tls_verify,
|
|
768
|
-
"cert": request_cert,
|
|
769
|
-
}
|
|
770
|
-
if request_proxy is not None:
|
|
771
|
-
kwargs["proxies"] = {"all": request_proxy}
|
|
772
|
-
hooks.dispatch("process_call_kwargs", hook_context, case, kwargs)
|
|
773
1014
|
response = case.call(**kwargs)
|
|
774
1015
|
except CheckFailed as exc:
|
|
775
1016
|
check_name = "request_timeout"
|
|
776
|
-
requests_kwargs =
|
|
1017
|
+
requests_kwargs = RequestsTransport().serialize_case(case, base_url=case.get_full_base_url(), headers=headers)
|
|
777
1018
|
request = requests.Request(**requests_kwargs).prepare()
|
|
778
|
-
elapsed = cast(
|
|
1019
|
+
elapsed = cast(
|
|
1020
|
+
float, request_config.prepared_timeout
|
|
1021
|
+
) # It is defined and not empty, since the exception happened
|
|
779
1022
|
check_result = result.add_failure(
|
|
780
1023
|
check_name, case, None, elapsed, f"Response timed out after {1000 * elapsed:.2f}ms", exc.context, request
|
|
781
1024
|
)
|
|
782
1025
|
check_results.append(check_result)
|
|
1026
|
+
if store_interactions:
|
|
1027
|
+
result.store_requests_response(case, None, Status.failure, [check_result], headers=headers, session=session)
|
|
783
1028
|
raise exc
|
|
784
1029
|
context = TargetContext(case=case, response=response, response_time=response.elapsed.total_seconds())
|
|
785
1030
|
run_targets(targets, context)
|
|
786
1031
|
status = Status.success
|
|
1032
|
+
|
|
1033
|
+
check_ctx = CheckContext(
|
|
1034
|
+
override=ctx.override,
|
|
1035
|
+
auth=ctx.auth,
|
|
1036
|
+
headers=CaseInsensitiveDict(headers) if headers else None,
|
|
1037
|
+
config=ctx.checks_config,
|
|
1038
|
+
transport_kwargs=kwargs,
|
|
1039
|
+
)
|
|
787
1040
|
try:
|
|
788
1041
|
run_checks(
|
|
789
1042
|
case=case,
|
|
1043
|
+
ctx=check_ctx,
|
|
790
1044
|
checks=checks,
|
|
791
1045
|
check_results=check_results,
|
|
792
1046
|
result=result,
|
|
793
1047
|
response=response,
|
|
794
1048
|
elapsed_time=context.response_time * 1000,
|
|
795
1049
|
max_response_time=max_response_time,
|
|
1050
|
+
no_failfast=ctx.no_failfast,
|
|
796
1051
|
)
|
|
797
1052
|
except CheckFailed:
|
|
798
1053
|
status = Status.failure
|
|
799
1054
|
raise
|
|
800
1055
|
finally:
|
|
801
|
-
feedback
|
|
1056
|
+
if feedback is not None:
|
|
1057
|
+
feedback.add_test_case(case, response)
|
|
802
1058
|
if store_interactions:
|
|
803
|
-
result.store_requests_response(case, response, status, check_results)
|
|
1059
|
+
result.store_requests_response(case, response, status, check_results, headers=headers, session=session)
|
|
804
1060
|
return response
|
|
805
1061
|
|
|
806
1062
|
|
|
@@ -812,15 +1068,9 @@ def get_session(auth: HTTPDigestAuth | RawAuth | None = None) -> Generator[reque
|
|
|
812
1068
|
yield session
|
|
813
1069
|
|
|
814
1070
|
|
|
815
|
-
|
|
816
|
-
"""Request timeout is in milliseconds, but `requests` uses seconds."""
|
|
817
|
-
output: int | float | None = timeout
|
|
818
|
-
if timeout is not None:
|
|
819
|
-
output = timeout / 1000
|
|
820
|
-
return output
|
|
821
|
-
|
|
822
|
-
|
|
1071
|
+
@cached_test_func
|
|
823
1072
|
def wsgi_test(
|
|
1073
|
+
ctx: RunnerContext,
|
|
824
1074
|
case: Case,
|
|
825
1075
|
checks: Iterable[CheckFunction],
|
|
826
1076
|
targets: Iterable[Target],
|
|
@@ -829,7 +1079,7 @@ def wsgi_test(
|
|
|
829
1079
|
auth_type: str | None,
|
|
830
1080
|
headers: dict[str, Any] | None,
|
|
831
1081
|
store_interactions: bool,
|
|
832
|
-
feedback: Feedback,
|
|
1082
|
+
feedback: Feedback | None,
|
|
833
1083
|
max_response_time: int | None,
|
|
834
1084
|
data_generation_methods: list[DataGenerationMethod],
|
|
835
1085
|
dry_run: bool,
|
|
@@ -838,9 +1088,10 @@ def wsgi_test(
|
|
|
838
1088
|
with ErrorCollector(errors):
|
|
839
1089
|
_force_data_generation_method(data_generation_methods, case)
|
|
840
1090
|
result.mark_executed()
|
|
841
|
-
headers =
|
|
1091
|
+
headers = prepare_wsgi_headers(headers, auth, auth_type)
|
|
842
1092
|
if not dry_run:
|
|
843
1093
|
args = (
|
|
1094
|
+
ctx,
|
|
844
1095
|
checks,
|
|
845
1096
|
targets,
|
|
846
1097
|
result,
|
|
@@ -851,78 +1102,73 @@ def wsgi_test(
|
|
|
851
1102
|
)
|
|
852
1103
|
response = _wsgi_test(case, *args)
|
|
853
1104
|
add_cases(case, response, _wsgi_test, *args)
|
|
1105
|
+
elif store_interactions:
|
|
1106
|
+
result.store_wsgi_response(case, None, headers, None, Status.skip, [])
|
|
854
1107
|
|
|
855
1108
|
|
|
856
1109
|
def _wsgi_test(
|
|
857
1110
|
case: Case,
|
|
1111
|
+
ctx: RunnerContext,
|
|
858
1112
|
checks: Iterable[CheckFunction],
|
|
859
1113
|
targets: Iterable[Target],
|
|
860
1114
|
result: TestResult,
|
|
861
1115
|
headers: dict[str, Any],
|
|
862
1116
|
store_interactions: bool,
|
|
863
|
-
feedback: Feedback,
|
|
1117
|
+
feedback: Feedback | None,
|
|
864
1118
|
max_response_time: int | None,
|
|
865
1119
|
) -> WSGIResponse:
|
|
1120
|
+
from ...transports.responses import WSGIResponse
|
|
1121
|
+
|
|
866
1122
|
with catching_logs(LogCaptureHandler(), level=logging.DEBUG) as recorded:
|
|
867
|
-
start = time.monotonic()
|
|
868
1123
|
hook_context = HookContext(operation=case.operation)
|
|
869
|
-
kwargs = {"headers": headers}
|
|
1124
|
+
kwargs: dict[str, Any] = {"headers": headers}
|
|
870
1125
|
hooks.dispatch("process_call_kwargs", hook_context, case, kwargs)
|
|
871
|
-
response = case.
|
|
872
|
-
|
|
873
|
-
context = TargetContext(case=case, response=response, response_time=elapsed)
|
|
1126
|
+
response = cast(WSGIResponse, case.call(**kwargs))
|
|
1127
|
+
context = TargetContext(case=case, response=response, response_time=response.elapsed.total_seconds())
|
|
874
1128
|
run_targets(targets, context)
|
|
875
1129
|
result.logs.extend(recorded.records)
|
|
876
1130
|
status = Status.success
|
|
877
1131
|
check_results: list[Check] = []
|
|
1132
|
+
check_ctx = CheckContext(
|
|
1133
|
+
override=ctx.override,
|
|
1134
|
+
auth=ctx.auth,
|
|
1135
|
+
headers=CaseInsensitiveDict(headers) if headers else None,
|
|
1136
|
+
config=ctx.checks_config,
|
|
1137
|
+
transport_kwargs=kwargs,
|
|
1138
|
+
)
|
|
878
1139
|
try:
|
|
879
1140
|
run_checks(
|
|
880
1141
|
case=case,
|
|
1142
|
+
ctx=check_ctx,
|
|
881
1143
|
checks=checks,
|
|
882
1144
|
check_results=check_results,
|
|
883
1145
|
result=result,
|
|
884
1146
|
response=response,
|
|
885
1147
|
elapsed_time=context.response_time * 1000,
|
|
886
1148
|
max_response_time=max_response_time,
|
|
1149
|
+
no_failfast=ctx.no_failfast,
|
|
887
1150
|
)
|
|
888
1151
|
except CheckFailed:
|
|
889
1152
|
status = Status.failure
|
|
890
1153
|
raise
|
|
891
1154
|
finally:
|
|
892
|
-
feedback
|
|
1155
|
+
if feedback is not None:
|
|
1156
|
+
feedback.add_test_case(case, response)
|
|
893
1157
|
if store_interactions:
|
|
894
|
-
result.store_wsgi_response(case, response, headers, elapsed, status, check_results)
|
|
1158
|
+
result.store_wsgi_response(case, response, headers, response.elapsed.total_seconds(), status, check_results)
|
|
895
1159
|
return response
|
|
896
1160
|
|
|
897
1161
|
|
|
898
|
-
|
|
899
|
-
headers: dict[str, Any] | None, auth: RawAuth | None, auth_type: str | None
|
|
900
|
-
) -> dict[str, Any]:
|
|
901
|
-
headers = headers or {}
|
|
902
|
-
if "user-agent" not in {header.lower() for header in headers}:
|
|
903
|
-
headers["User-Agent"] = USER_AGENT
|
|
904
|
-
wsgi_auth = get_wsgi_auth(auth, auth_type)
|
|
905
|
-
if wsgi_auth:
|
|
906
|
-
headers["Authorization"] = wsgi_auth
|
|
907
|
-
return headers
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
def get_wsgi_auth(auth: RawAuth | None, auth_type: str | None) -> str | None:
|
|
911
|
-
if auth:
|
|
912
|
-
if auth_type == "digest":
|
|
913
|
-
raise ValueError("Digest auth is not supported for WSGI apps")
|
|
914
|
-
return _basic_auth_str(*auth)
|
|
915
|
-
return None
|
|
916
|
-
|
|
917
|
-
|
|
1162
|
+
@cached_test_func
|
|
918
1163
|
def asgi_test(
|
|
1164
|
+
ctx: RunnerContext,
|
|
919
1165
|
case: Case,
|
|
920
1166
|
checks: Iterable[CheckFunction],
|
|
921
1167
|
targets: Iterable[Target],
|
|
922
1168
|
result: TestResult,
|
|
923
1169
|
store_interactions: bool,
|
|
924
1170
|
headers: dict[str, Any] | None,
|
|
925
|
-
feedback: Feedback,
|
|
1171
|
+
feedback: Feedback | None,
|
|
926
1172
|
max_response_time: int | None,
|
|
927
1173
|
data_generation_methods: list[DataGenerationMethod],
|
|
928
1174
|
dry_run: bool,
|
|
@@ -936,6 +1182,7 @@ def asgi_test(
|
|
|
936
1182
|
|
|
937
1183
|
if not dry_run:
|
|
938
1184
|
args = (
|
|
1185
|
+
ctx,
|
|
939
1186
|
checks,
|
|
940
1187
|
targets,
|
|
941
1188
|
result,
|
|
@@ -946,41 +1193,54 @@ def asgi_test(
|
|
|
946
1193
|
)
|
|
947
1194
|
response = _asgi_test(case, *args)
|
|
948
1195
|
add_cases(case, response, _asgi_test, *args)
|
|
1196
|
+
elif store_interactions:
|
|
1197
|
+
result.store_requests_response(case, None, Status.skip, [], headers=headers, session=None)
|
|
949
1198
|
|
|
950
1199
|
|
|
951
1200
|
def _asgi_test(
|
|
952
1201
|
case: Case,
|
|
1202
|
+
ctx: RunnerContext,
|
|
953
1203
|
checks: Iterable[CheckFunction],
|
|
954
1204
|
targets: Iterable[Target],
|
|
955
1205
|
result: TestResult,
|
|
956
1206
|
store_interactions: bool,
|
|
957
1207
|
headers: dict[str, Any] | None,
|
|
958
|
-
feedback: Feedback,
|
|
1208
|
+
feedback: Feedback | None,
|
|
959
1209
|
max_response_time: int | None,
|
|
960
1210
|
) -> requests.Response:
|
|
961
1211
|
hook_context = HookContext(operation=case.operation)
|
|
962
1212
|
kwargs: dict[str, Any] = {"headers": headers}
|
|
963
1213
|
hooks.dispatch("process_call_kwargs", hook_context, case, kwargs)
|
|
964
|
-
response = case.
|
|
1214
|
+
response = case.call(**kwargs)
|
|
965
1215
|
context = TargetContext(case=case, response=response, response_time=response.elapsed.total_seconds())
|
|
966
1216
|
run_targets(targets, context)
|
|
967
1217
|
status = Status.success
|
|
968
1218
|
check_results: list[Check] = []
|
|
1219
|
+
check_ctx = CheckContext(
|
|
1220
|
+
override=ctx.override,
|
|
1221
|
+
auth=ctx.auth,
|
|
1222
|
+
headers=CaseInsensitiveDict(headers) if headers else None,
|
|
1223
|
+
config=ctx.checks_config,
|
|
1224
|
+
transport_kwargs=kwargs,
|
|
1225
|
+
)
|
|
969
1226
|
try:
|
|
970
1227
|
run_checks(
|
|
971
1228
|
case=case,
|
|
1229
|
+
ctx=check_ctx,
|
|
972
1230
|
checks=checks,
|
|
973
1231
|
check_results=check_results,
|
|
974
1232
|
result=result,
|
|
975
1233
|
response=response,
|
|
976
1234
|
elapsed_time=context.response_time * 1000,
|
|
977
1235
|
max_response_time=max_response_time,
|
|
1236
|
+
no_failfast=ctx.no_failfast,
|
|
978
1237
|
)
|
|
979
1238
|
except CheckFailed:
|
|
980
1239
|
status = Status.failure
|
|
981
1240
|
raise
|
|
982
1241
|
finally:
|
|
983
|
-
feedback
|
|
1242
|
+
if feedback is not None:
|
|
1243
|
+
feedback.add_test_case(case, response)
|
|
984
1244
|
if store_interactions:
|
|
985
|
-
result.store_requests_response(case, response, status, check_results)
|
|
1245
|
+
result.store_requests_response(case, response, status, check_results, headers, session=None)
|
|
986
1246
|
return response
|