schemathesis 4.0.0a4__py3-none-any.whl → 4.0.0a6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- schemathesis/cli/commands/run/__init__.py +15 -45
- schemathesis/cli/commands/run/checks.py +2 -3
- schemathesis/cli/commands/run/context.py +30 -17
- schemathesis/cli/commands/run/executor.py +1 -0
- schemathesis/cli/commands/run/handlers/output.py +168 -88
- schemathesis/cli/commands/run/hypothesis.py +7 -45
- schemathesis/core/__init__.py +7 -1
- schemathesis/engine/config.py +2 -2
- schemathesis/engine/core.py +11 -1
- schemathesis/engine/events.py +7 -0
- schemathesis/engine/phases/__init__.py +16 -4
- schemathesis/engine/phases/unit/__init__.py +77 -52
- schemathesis/engine/phases/unit/_executor.py +14 -12
- schemathesis/engine/phases/unit/_pool.py +8 -0
- schemathesis/experimental/__init__.py +0 -6
- schemathesis/generation/hypothesis/builder.py +222 -97
- schemathesis/openapi/checks.py +3 -1
- schemathesis/pytest/lazy.py +41 -2
- schemathesis/pytest/plugin.py +2 -1
- schemathesis/specs/openapi/checks.py +1 -1
- schemathesis/specs/openapi/examples.py +39 -25
- schemathesis/specs/openapi/patterns.py +39 -7
- schemathesis/specs/openapi/serialization.py +14 -0
- schemathesis/transport/requests.py +10 -1
- {schemathesis-4.0.0a4.dist-info → schemathesis-4.0.0a6.dist-info}/METADATA +47 -91
- {schemathesis-4.0.0a4.dist-info → schemathesis-4.0.0a6.dist-info}/RECORD +29 -29
- {schemathesis-4.0.0a4.dist-info → schemathesis-4.0.0a6.dist-info}/licenses/LICENSE +1 -1
- {schemathesis-4.0.0a4.dist-info → schemathesis-4.0.0a6.dist-info}/WHEEL +0 -0
- {schemathesis-4.0.0a4.dist-info → schemathesis-4.0.0a6.dist-info}/entry_points.txt +0 -0
@@ -22,11 +22,13 @@ from schemathesis.core.output import prepare_response_payload
|
|
22
22
|
from schemathesis.core.result import Err, Ok
|
23
23
|
from schemathesis.core.version import SCHEMATHESIS_VERSION
|
24
24
|
from schemathesis.engine import Status, events
|
25
|
+
from schemathesis.engine.config import EngineConfig
|
25
26
|
from schemathesis.engine.errors import EngineErrorInfo
|
26
27
|
from schemathesis.engine.phases import PhaseName, PhaseSkipReason
|
27
28
|
from schemathesis.engine.phases.probes import ProbeOutcome
|
28
|
-
from schemathesis.engine.recorder import Interaction
|
29
|
+
from schemathesis.engine.recorder import Interaction, ScenarioRecorder
|
29
30
|
from schemathesis.experimental import GLOBAL_EXPERIMENTS
|
31
|
+
from schemathesis.generation.modes import GenerationMode
|
30
32
|
from schemathesis.schemas import ApiStatistic
|
31
33
|
|
32
34
|
if TYPE_CHECKING:
|
@@ -323,7 +325,8 @@ class ProbingProgressManager:
|
|
323
325
|
|
324
326
|
@dataclass
|
325
327
|
class WarningData:
|
326
|
-
missing_auth: dict[int,
|
328
|
+
missing_auth: dict[int, set[str]] = field(default_factory=dict)
|
329
|
+
only_4xx_responses: set[str] = field(default_factory=set) # operations that only returned 4xx
|
327
330
|
|
328
331
|
|
329
332
|
@dataclass
|
@@ -459,9 +462,10 @@ class UnitTestProgressManager:
|
|
459
462
|
if self.stats[Status.FAILURE]:
|
460
463
|
parts.append(f"❌ {self.stats[Status.FAILURE]:{width}d} failed")
|
461
464
|
if self.stats[Status.ERROR]:
|
462
|
-
|
465
|
+
suffix = "s" if self.stats[Status.ERROR] > 1 else ""
|
466
|
+
parts.append(f"🚫 {self.stats[Status.ERROR]:{width}d} error{suffix}")
|
463
467
|
if self.stats[Status.SKIP] or self.stats[Status.INTERRUPTED]:
|
464
|
-
parts.append(f"
|
468
|
+
parts.append(f"⏭ {self.stats[Status.SKIP] + self.stats[Status.INTERRUPTED]:{width}d} skipped")
|
465
469
|
return " ".join(parts)
|
466
470
|
|
467
471
|
def _update_stats_display(self) -> None:
|
@@ -513,7 +517,11 @@ class UnitTestProgressManager:
|
|
513
517
|
if operation := self.current_operations.pop(label, None):
|
514
518
|
if not self.current_operations:
|
515
519
|
assert self.title_task_id is not None
|
516
|
-
self.
|
520
|
+
if self.current == self.total - 1:
|
521
|
+
description = f" {self.title}"
|
522
|
+
else:
|
523
|
+
description = self.title
|
524
|
+
self.title_progress.update(self.title_task_id, description=description)
|
517
525
|
self.operations_progress.update(operation.task_id, visible=False)
|
518
526
|
|
519
527
|
def update_stats(self, status: Status) -> None:
|
@@ -542,7 +550,7 @@ class UnitTestProgressManager:
|
|
542
550
|
elif self.stats[Status.SUCCESS] > 0:
|
543
551
|
icon = "✅"
|
544
552
|
elif self.stats[Status.SKIP] > 0:
|
545
|
-
icon = "
|
553
|
+
icon = "⏭ "
|
546
554
|
else:
|
547
555
|
icon = default_icon
|
548
556
|
return icon
|
@@ -661,7 +669,7 @@ class StatefulProgressManager:
|
|
661
669
|
from rich.text import Text
|
662
670
|
|
663
671
|
# Initialize progress displays
|
664
|
-
self.title_task_id = self.title_progress.add_task("Stateful
|
672
|
+
self.title_task_id = self.title_progress.add_task("Stateful")
|
665
673
|
self.progress_task_id = self.progress_bar.add_task(
|
666
674
|
"", scenarios=0, links=f"0 covered / {self.links_selected} selected / {self.links_total} total links"
|
667
675
|
)
|
@@ -710,9 +718,10 @@ class StatefulProgressManager:
|
|
710
718
|
if self.stats[Status.FAILURE]:
|
711
719
|
parts.append(f"❌ {self.stats[Status.FAILURE]} failed")
|
712
720
|
if self.stats[Status.ERROR]:
|
713
|
-
|
721
|
+
suffix = "s" if self.stats[Status.ERROR] > 1 else ""
|
722
|
+
parts.append(f"🚫 {self.stats[Status.ERROR]} error{suffix}")
|
714
723
|
if self.stats[Status.SKIP]:
|
715
|
-
parts.append(f"
|
724
|
+
parts.append(f"⏭ {self.stats[Status.SKIP]} skipped")
|
716
725
|
return " ".join(parts)
|
717
726
|
|
718
727
|
def _update_stats_display(self) -> None:
|
@@ -729,7 +738,7 @@ class StatefulProgressManager:
|
|
729
738
|
elif self.stats[Status.SUCCESS] > 0:
|
730
739
|
icon = "✅"
|
731
740
|
elif self.stats[Status.SKIP] > 0:
|
732
|
-
icon = "
|
741
|
+
icon = "⏭ "
|
733
742
|
else:
|
734
743
|
icon = default_icon
|
735
744
|
return icon
|
@@ -755,39 +764,18 @@ class StatefulProgressManager:
|
|
755
764
|
|
756
765
|
|
757
766
|
def format_duration(duration_ms: int) -> str:
|
758
|
-
"""Format duration in milliseconds to
|
759
|
-
|
760
|
-
|
761
|
-
# Convert to components
|
762
|
-
ms = duration_ms % 1000
|
763
|
-
seconds = (duration_ms // 1000) % 60
|
764
|
-
minutes = (duration_ms // (1000 * 60)) % 60
|
765
|
-
hours = duration_ms // (1000 * 60 * 60)
|
766
|
-
|
767
|
-
# Add non-empty components
|
768
|
-
if hours > 0:
|
769
|
-
parts.append(f"{hours} h")
|
770
|
-
if minutes > 0:
|
771
|
-
parts.append(f"{minutes} m")
|
772
|
-
if seconds > 0:
|
773
|
-
parts.append(f"{seconds} s")
|
774
|
-
if ms > 0:
|
775
|
-
parts.append(f"{ms} ms")
|
776
|
-
|
777
|
-
# Handle zero duration
|
778
|
-
if not parts:
|
779
|
-
return "0 ms"
|
780
|
-
|
781
|
-
return " ".join(parts)
|
767
|
+
"""Format duration in milliseconds to seconds with 2 decimal places."""
|
768
|
+
return f"{duration_ms / 1000:.2f}s"
|
782
769
|
|
783
770
|
|
784
771
|
@dataclass
|
785
772
|
class OutputHandler(EventHandler):
|
786
773
|
workers_num: int
|
787
|
-
# Seed can
|
774
|
+
# Seed can be absent in the deterministic mode
|
788
775
|
seed: int | None
|
789
776
|
rate_limit: str | None
|
790
777
|
wait_for_schema: float | None
|
778
|
+
engine_config: EngineConfig
|
791
779
|
|
792
780
|
loading_manager: LoadingProgressManager | None = None
|
793
781
|
probing_manager: ProbingProgressManager | None = None
|
@@ -798,9 +786,9 @@ class OutputHandler(EventHandler):
|
|
798
786
|
skip_reasons: list[str] = field(default_factory=list)
|
799
787
|
report_config: ReportConfig | None = None
|
800
788
|
warnings: WarningData = field(default_factory=WarningData)
|
801
|
-
errors:
|
789
|
+
errors: set[events.NonFatalError] = field(default_factory=set)
|
802
790
|
phases: dict[PhaseName, tuple[Status, PhaseSkipReason | None]] = field(
|
803
|
-
default_factory=lambda:
|
791
|
+
default_factory=lambda: dict.fromkeys(PhaseName, (Status.SKIP, None))
|
804
792
|
)
|
805
793
|
console: Console = field(default_factory=_default_console)
|
806
794
|
|
@@ -820,7 +808,7 @@ class OutputHandler(EventHandler):
|
|
820
808
|
elif isinstance(event, events.FatalError):
|
821
809
|
self._on_fatal_error(ctx, event)
|
822
810
|
elif isinstance(event, events.NonFatalError):
|
823
|
-
self.errors.
|
811
|
+
self.errors.add(event)
|
824
812
|
elif isinstance(event, LoadingStarted):
|
825
813
|
self._on_loading_started(event)
|
826
814
|
elif isinstance(event, LoadingFinished):
|
@@ -885,8 +873,8 @@ class OutputHandler(EventHandler):
|
|
885
873
|
phase = event.phase
|
886
874
|
if phase.name == PhaseName.PROBING and phase.is_enabled:
|
887
875
|
self._start_probing()
|
888
|
-
elif phase.name
|
889
|
-
self._start_unit_tests()
|
876
|
+
elif phase.name in [PhaseName.EXAMPLES, PhaseName.COVERAGE, PhaseName.FUZZING] and phase.is_enabled:
|
877
|
+
self._start_unit_tests(phase.name)
|
890
878
|
elif phase.name == PhaseName.STATEFUL_TESTING and phase.is_enabled and phase.skip_reason is None:
|
891
879
|
self._start_stateful_tests()
|
892
880
|
|
@@ -894,11 +882,12 @@ class OutputHandler(EventHandler):
|
|
894
882
|
self.probing_manager = ProbingProgressManager(console=self.console)
|
895
883
|
self.probing_manager.start()
|
896
884
|
|
897
|
-
def _start_unit_tests(self) -> None:
|
885
|
+
def _start_unit_tests(self, phase: PhaseName) -> None:
|
898
886
|
assert self.statistic is not None
|
887
|
+
assert self.unit_tests_manager is None
|
899
888
|
self.unit_tests_manager = UnitTestProgressManager(
|
900
889
|
console=self.console,
|
901
|
-
title=
|
890
|
+
title=phase.value,
|
902
891
|
total=self.statistic.operations.selected,
|
903
892
|
)
|
904
893
|
self.unit_tests_manager.start()
|
@@ -907,7 +896,7 @@ class OutputHandler(EventHandler):
|
|
907
896
|
assert self.statistic is not None
|
908
897
|
self.stateful_tests_manager = StatefulProgressManager(
|
909
898
|
console=self.console,
|
910
|
-
title="Stateful
|
899
|
+
title="Stateful",
|
911
900
|
links_selected=self.statistic.links.selected,
|
912
901
|
links_total=self.statistic.links.total,
|
913
902
|
)
|
@@ -963,7 +952,7 @@ class OutputHandler(EventHandler):
|
|
963
952
|
elif event.status == Status.SKIP:
|
964
953
|
message = Padding(
|
965
954
|
Text.assemble(
|
966
|
-
("
|
955
|
+
("⏭ ", ""),
|
967
956
|
("API probing skipped", Style(color="yellow")),
|
968
957
|
),
|
969
958
|
BLOCK_PADDING,
|
@@ -981,8 +970,7 @@ class OutputHandler(EventHandler):
|
|
981
970
|
)
|
982
971
|
self.console.print(message)
|
983
972
|
self.console.print()
|
984
|
-
elif phase.name == PhaseName.STATEFUL_TESTING and phase.is_enabled:
|
985
|
-
assert self.stateful_tests_manager is not None
|
973
|
+
elif phase.name == PhaseName.STATEFUL_TESTING and phase.is_enabled and self.stateful_tests_manager is not None:
|
986
974
|
self.stateful_tests_manager.stop()
|
987
975
|
if event.status == Status.ERROR:
|
988
976
|
title, summary = self.stateful_tests_manager.get_completion_message("🚫")
|
@@ -1011,27 +999,29 @@ class OutputHandler(EventHandler):
|
|
1011
999
|
self.console.print(Padding(Text(summary, style="bright_white"), (0, 0, 0, 5)))
|
1012
1000
|
self.console.print()
|
1013
1001
|
self.stateful_tests_manager = None
|
1014
|
-
elif
|
1015
|
-
|
1002
|
+
elif (
|
1003
|
+
phase.name in [PhaseName.EXAMPLES, PhaseName.COVERAGE, PhaseName.FUZZING]
|
1004
|
+
and phase.is_enabled
|
1005
|
+
and self.unit_tests_manager is not None
|
1006
|
+
):
|
1016
1007
|
self.unit_tests_manager.stop()
|
1017
1008
|
if event.status == Status.ERROR:
|
1018
1009
|
message = self.unit_tests_manager.get_completion_message("🚫")
|
1019
1010
|
else:
|
1020
1011
|
message = self.unit_tests_manager.get_completion_message()
|
1021
1012
|
self.console.print(Padding(Text(message, style="white"), BLOCK_PADDING))
|
1022
|
-
|
1023
|
-
self.console.print()
|
1013
|
+
self.console.print()
|
1024
1014
|
self.unit_tests_manager = None
|
1025
1015
|
|
1026
1016
|
def _on_scenario_started(self, event: events.ScenarioStarted) -> None:
|
1027
|
-
if event.phase
|
1017
|
+
if event.phase in [PhaseName.EXAMPLES, PhaseName.COVERAGE, PhaseName.FUZZING]:
|
1028
1018
|
# We should display execution result + percentage in the end. For example:
|
1029
1019
|
assert event.label is not None
|
1030
1020
|
assert self.unit_tests_manager is not None
|
1031
1021
|
self.unit_tests_manager.start_operation(event.label)
|
1032
1022
|
|
1033
1023
|
def _on_scenario_finished(self, event: events.ScenarioFinished) -> None:
|
1034
|
-
if event.phase
|
1024
|
+
if event.phase in [PhaseName.EXAMPLES, PhaseName.COVERAGE, PhaseName.FUZZING]:
|
1035
1025
|
assert self.unit_tests_manager is not None
|
1036
1026
|
if event.label:
|
1037
1027
|
self.unit_tests_manager.finish_operation(event.label)
|
@@ -1050,9 +1040,39 @@ class OutputHandler(EventHandler):
|
|
1050
1040
|
self.stateful_tests_manager.update(links_seen, event.status)
|
1051
1041
|
|
1052
1042
|
def _check_warnings(self, event: events.ScenarioFinished) -> None:
|
1043
|
+
statistic = aggregate_status_codes(event.recorder.interactions.values())
|
1044
|
+
|
1045
|
+
if statistic.total == 0:
|
1046
|
+
return
|
1047
|
+
|
1053
1048
|
for status_code in (401, 403):
|
1054
|
-
if
|
1055
|
-
self.warnings.missing_auth.setdefault(status_code,
|
1049
|
+
if statistic.ratio_for(status_code) >= TOO_MANY_RESPONSES_THRESHOLD:
|
1050
|
+
self.warnings.missing_auth.setdefault(status_code, set()).add(event.recorder.label)
|
1051
|
+
|
1052
|
+
# Warn if all positive test cases got 4xx in return and no failure was found
|
1053
|
+
def all_positive_are_rejected(recorder: ScenarioRecorder) -> bool:
|
1054
|
+
seen_positive = False
|
1055
|
+
for case in recorder.cases.values():
|
1056
|
+
if not (case.value.meta is not None and case.value.meta.generation.mode == GenerationMode.POSITIVE):
|
1057
|
+
continue
|
1058
|
+
seen_positive = True
|
1059
|
+
interaction = recorder.interactions.get(case.value.id)
|
1060
|
+
if not (interaction is not None and interaction.response is not None):
|
1061
|
+
continue
|
1062
|
+
# At least one positive response for positive test case
|
1063
|
+
if 200 <= interaction.response.status_code < 300:
|
1064
|
+
return False
|
1065
|
+
# If there are positive test cases, and we ended up here, then there are no 2xx responses for them
|
1066
|
+
# Otherwise, there are no positive test cases at all and this check should pass
|
1067
|
+
return seen_positive
|
1068
|
+
|
1069
|
+
if (
|
1070
|
+
event.status == Status.SUCCESS
|
1071
|
+
and GenerationMode.POSITIVE in self.engine_config.execution.generation.modes
|
1072
|
+
and all_positive_are_rejected(event.recorder)
|
1073
|
+
and statistic.should_warn_about_only_4xx()
|
1074
|
+
):
|
1075
|
+
self.warnings.only_4xx_responses.add(event.recorder.label)
|
1056
1076
|
|
1057
1077
|
def _on_interrupted(self, event: events.Interrupted) -> None:
|
1058
1078
|
from rich.padding import Padding
|
@@ -1122,36 +1142,58 @@ class OutputHandler(EventHandler):
|
|
1122
1142
|
|
1123
1143
|
def display_warnings(self) -> None:
|
1124
1144
|
display_section_name("WARNINGS")
|
1125
|
-
|
1126
|
-
|
1127
|
-
|
1128
|
-
|
1129
|
-
|
1130
|
-
|
1145
|
+
click.echo()
|
1146
|
+
if self.warnings.missing_auth:
|
1147
|
+
total = sum(len(endpoints) for endpoints in self.warnings.missing_auth.values())
|
1148
|
+
suffix = "" if total == 1 else "s"
|
1149
|
+
click.echo(
|
1150
|
+
_style(
|
1151
|
+
f"Missing or invalid API credentials: {total} API operation{suffix} returned authentication errors\n",
|
1152
|
+
fg="yellow",
|
1153
|
+
)
|
1131
1154
|
)
|
1132
|
-
)
|
1133
1155
|
|
1134
|
-
|
1135
|
-
|
1136
|
-
|
1156
|
+
for status_code, operations in self.warnings.missing_auth.items():
|
1157
|
+
status_text = "Unauthorized" if status_code == 401 else "Forbidden"
|
1158
|
+
count = len(operations)
|
1159
|
+
suffix = "" if count == 1 else "s"
|
1160
|
+
click.echo(
|
1161
|
+
_style(
|
1162
|
+
f"{status_code} {status_text} ({count} operation{suffix}):",
|
1163
|
+
fg="yellow",
|
1164
|
+
)
|
1165
|
+
)
|
1166
|
+
# Show first few API operations
|
1167
|
+
for endpoint in sorted(operations)[:3]:
|
1168
|
+
click.echo(_style(f" - {endpoint}", fg="yellow"))
|
1169
|
+
if len(operations) > 3:
|
1170
|
+
click.echo(_style(f" + {len(operations) - 3} more", fg="yellow"))
|
1171
|
+
click.echo()
|
1172
|
+
click.echo(_style("Tip: ", bold=True, fg="yellow"), nl=False)
|
1173
|
+
click.echo(_style(f"Use {bold('--auth')} ", fg="yellow"), nl=False)
|
1174
|
+
click.echo(_style(f"or {bold('-H')} ", fg="yellow"), nl=False)
|
1175
|
+
click.echo(_style("to provide authentication credentials", fg="yellow"))
|
1176
|
+
click.echo()
|
1177
|
+
|
1178
|
+
if self.warnings.only_4xx_responses:
|
1179
|
+
count = len(self.warnings.only_4xx_responses)
|
1137
1180
|
suffix = "" if count == 1 else "s"
|
1138
1181
|
click.echo(
|
1139
1182
|
_style(
|
1140
|
-
f"
|
1183
|
+
f"Schemathesis configuration: {count} operation{suffix} returned only 4xx responses during unit tests\n",
|
1141
1184
|
fg="yellow",
|
1142
1185
|
)
|
1143
1186
|
)
|
1144
|
-
|
1145
|
-
for endpoint in
|
1187
|
+
|
1188
|
+
for endpoint in sorted(self.warnings.only_4xx_responses)[:3]:
|
1146
1189
|
click.echo(_style(f" - {endpoint}", fg="yellow"))
|
1147
|
-
if len(
|
1148
|
-
click.echo(_style(f" + {len(
|
1190
|
+
if len(self.warnings.only_4xx_responses) > 3:
|
1191
|
+
click.echo(_style(f" + {len(self.warnings.only_4xx_responses) - 3} more", fg="yellow"))
|
1192
|
+
click.echo()
|
1193
|
+
|
1194
|
+
click.echo(_style("Tip: ", bold=True, fg="yellow"), nl=False)
|
1195
|
+
click.echo(_style("Check base URL or adjust data generation settings", fg="yellow"))
|
1149
1196
|
click.echo()
|
1150
|
-
click.echo(_style("Tip: ", bold=True, fg="yellow"), nl=False)
|
1151
|
-
click.echo(_style(f"Use {bold('--auth')} ", fg="yellow"), nl=False)
|
1152
|
-
click.echo(_style(f"or {bold('-H')} ", fg="yellow"), nl=False)
|
1153
|
-
click.echo(_style("to provide authentication credentials", fg="yellow"))
|
1154
|
-
click.echo()
|
1155
1197
|
|
1156
1198
|
def display_experiments(self) -> None:
|
1157
1199
|
display_section_name("EXPERIMENTS")
|
@@ -1235,7 +1277,7 @@ class OutputHandler(EventHandler):
|
|
1235
1277
|
err.label
|
1236
1278
|
for err in self.errors
|
1237
1279
|
# Some API operations may have some tests before they have an error
|
1238
|
-
if err.phase
|
1280
|
+
if err.phase in [PhaseName.EXAMPLES, PhaseName.COVERAGE, PhaseName.FUZZING]
|
1239
1281
|
and err.label not in ctx.statistic.tested_operations
|
1240
1282
|
and err.related_to_operation
|
1241
1283
|
}
|
@@ -1258,7 +1300,7 @@ class OutputHandler(EventHandler):
|
|
1258
1300
|
status, skip_reason = self.phases[phase]
|
1259
1301
|
|
1260
1302
|
if status == Status.SKIP:
|
1261
|
-
click.echo(_style(f"
|
1303
|
+
click.echo(_style(f" ⏭ {phase.value}", fg="yellow"), nl=False)
|
1262
1304
|
if skip_reason:
|
1263
1305
|
click.echo(_style(f" ({skip_reason.value})", fg="yellow"))
|
1264
1306
|
else:
|
@@ -1395,7 +1437,7 @@ class OutputHandler(EventHandler):
|
|
1395
1437
|
assert self.stateful_tests_manager is None
|
1396
1438
|
if self.errors:
|
1397
1439
|
display_section_name("ERRORS")
|
1398
|
-
errors = sorted(self.errors, key=lambda r: (r.phase.value, r.label))
|
1440
|
+
errors = sorted(self.errors, key=lambda r: (r.phase.value, r.label, r.info.title))
|
1399
1441
|
for error in errors:
|
1400
1442
|
display_section_name(error.label, "_", fg="red")
|
1401
1443
|
click.echo(error.info.format(bold=lambda x: click.style(x, bold=True)))
|
@@ -1406,7 +1448,7 @@ class OutputHandler(EventHandler):
|
|
1406
1448
|
)
|
1407
1449
|
)
|
1408
1450
|
display_failures(ctx)
|
1409
|
-
if self.warnings.missing_auth:
|
1451
|
+
if self.warnings.missing_auth or self.warnings.only_4xx_responses:
|
1410
1452
|
self.display_warnings()
|
1411
1453
|
if GLOBAL_EXPERIMENTS.enabled:
|
1412
1454
|
self.display_experiments()
|
@@ -1426,10 +1468,21 @@ class OutputHandler(EventHandler):
|
|
1426
1468
|
if self.errors:
|
1427
1469
|
self.display_errors_summary()
|
1428
1470
|
|
1429
|
-
if self.warnings.missing_auth:
|
1430
|
-
affected = sum(len(operations) for operations in self.warnings.missing_auth.values())
|
1471
|
+
if self.warnings.missing_auth or self.warnings.only_4xx_responses:
|
1431
1472
|
click.echo(_style("Warnings:", bold=True))
|
1432
|
-
|
1473
|
+
|
1474
|
+
if self.warnings.missing_auth:
|
1475
|
+
affected = sum(len(operations) for operations in self.warnings.missing_auth.values())
|
1476
|
+
click.echo(_style(f" ⚠️ Missing authentication: {bold(str(affected))}", fg="yellow"))
|
1477
|
+
|
1478
|
+
if self.warnings.only_4xx_responses:
|
1479
|
+
count = len(self.warnings.only_4xx_responses)
|
1480
|
+
suffix = "" if count == 1 else "s"
|
1481
|
+
click.echo(
|
1482
|
+
_style(f" ⚠️ Schemathesis configuration: {bold(str(count))}", fg="yellow"),
|
1483
|
+
nl=False,
|
1484
|
+
)
|
1485
|
+
click.echo(_style(f" operation{suffix} returned only 4xx responses during unit tests", fg="yellow"))
|
1433
1486
|
click.echo()
|
1434
1487
|
|
1435
1488
|
if ctx.summary_lines:
|
@@ -1448,14 +1501,41 @@ TOO_MANY_RESPONSES_WARNING_TEMPLATE = (
|
|
1448
1501
|
TOO_MANY_RESPONSES_THRESHOLD = 0.9
|
1449
1502
|
|
1450
1503
|
|
1451
|
-
|
1452
|
-
|
1504
|
+
@dataclass
|
1505
|
+
class StatusCodeStatistic:
|
1506
|
+
"""Statistics about HTTP status codes in a scenario."""
|
1507
|
+
|
1508
|
+
counts: dict[int, int]
|
1509
|
+
total: int
|
1510
|
+
|
1511
|
+
__slots__ = ("counts", "total")
|
1512
|
+
|
1513
|
+
def ratio_for(self, status_code: int) -> float:
|
1514
|
+
"""Calculate the ratio of responses with the given status code."""
|
1515
|
+
if self.total == 0:
|
1516
|
+
return 0.0
|
1517
|
+
return self.counts.get(status_code, 0) / self.total
|
1518
|
+
|
1519
|
+
def should_warn_about_only_4xx(self) -> bool:
|
1520
|
+
"""Check if an operation should be warned about (only 4xx responses, excluding auth)."""
|
1521
|
+
if self.total == 0:
|
1522
|
+
return False
|
1523
|
+
# Don't duplicate auth warnings
|
1524
|
+
if set(self.counts.keys()) <= {401, 403}:
|
1525
|
+
return False
|
1526
|
+
# At this point we know we only have 4xx responses
|
1527
|
+
return True
|
1528
|
+
|
1529
|
+
|
1530
|
+
def aggregate_status_codes(interactions: Iterable[Interaction]) -> StatusCodeStatistic:
|
1531
|
+
"""Analyze status codes from interactions."""
|
1532
|
+
counts: dict[int, int] = {}
|
1453
1533
|
total = 0
|
1534
|
+
|
1454
1535
|
for interaction in interactions:
|
1455
1536
|
if interaction.response is not None:
|
1456
|
-
|
1457
|
-
|
1537
|
+
status = interaction.response.status_code
|
1538
|
+
counts[status] = counts.get(status, 0) + 1
|
1458
1539
|
total += 1
|
1459
|
-
|
1460
|
-
|
1461
|
-
return matched / total >= TOO_MANY_RESPONSES_THRESHOLD
|
1540
|
+
|
1541
|
+
return StatusCodeStatistic(counts=counts, total=total)
|
@@ -3,41 +3,15 @@ from __future__ import annotations
|
|
3
3
|
from enum import Enum, unique
|
4
4
|
from typing import TYPE_CHECKING, Any
|
5
5
|
|
6
|
-
import click
|
7
|
-
|
8
6
|
if TYPE_CHECKING:
|
9
7
|
import hypothesis
|
10
8
|
|
11
|
-
PHASES_INVALID_USAGE_MESSAGE = "Can't use `--hypothesis-phases` and `--hypothesis-no-phases` simultaneously"
|
12
9
|
HYPOTHESIS_IN_MEMORY_DATABASE_IDENTIFIER = ":memory:"
|
13
10
|
|
14
11
|
# Importing Hypothesis is expensive, hence we re-create the enums we need in CLI commands definitions
|
15
12
|
# Hypothesis is stable, hence it should not be a problem and adding new variants should not be automatic
|
16
13
|
|
17
14
|
|
18
|
-
@unique
|
19
|
-
class Phase(str, Enum):
|
20
|
-
explicit = "explicit" #: controls whether explicit examples are run.
|
21
|
-
reuse = "reuse" #: controls whether previous examples will be reused.
|
22
|
-
generate = "generate" #: controls whether new examples will be generated.
|
23
|
-
target = "target" #: controls whether examples will be mutated for targeting.
|
24
|
-
# The `explain` phase is not supported
|
25
|
-
|
26
|
-
def as_hypothesis(self) -> hypothesis.Phase:
|
27
|
-
from hypothesis import Phase
|
28
|
-
|
29
|
-
return Phase[self.name]
|
30
|
-
|
31
|
-
@staticmethod
|
32
|
-
def filter_from_all(variants: list[Phase], no_shrink: bool) -> list[hypothesis.Phase]:
|
33
|
-
from hypothesis import Phase
|
34
|
-
|
35
|
-
phases = set(Phase) - {Phase.explain} - set(variants)
|
36
|
-
if no_shrink:
|
37
|
-
return list(phases - {Phase.shrink})
|
38
|
-
return list(phases)
|
39
|
-
|
40
|
-
|
41
15
|
@unique
|
42
16
|
class HealthCheck(str, Enum):
|
43
17
|
# We remove not relevant checks
|
@@ -65,25 +39,13 @@ def prepare_health_checks(
|
|
65
39
|
return [entry for health_check in hypothesis_suppress_health_check for entry in health_check.as_hypothesis()]
|
66
40
|
|
67
41
|
|
68
|
-
def prepare_phases(
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
if hypothesis_phases is not None and hypothesis_no_phases is not None:
|
76
|
-
raise click.UsageError(PHASES_INVALID_USAGE_MESSAGE)
|
77
|
-
if hypothesis_phases:
|
78
|
-
phases = [phase.as_hypothesis() for phase in hypothesis_phases]
|
79
|
-
if not no_shrink:
|
80
|
-
phases.append(HypothesisPhase.shrink)
|
81
|
-
return phases
|
82
|
-
elif hypothesis_no_phases:
|
83
|
-
return Phase.filter_from_all(hypothesis_no_phases, no_shrink)
|
84
|
-
elif no_shrink:
|
85
|
-
return Phase.filter_from_all([], no_shrink)
|
86
|
-
return None
|
42
|
+
def prepare_phases(no_shrink: bool = False) -> list[hypothesis.Phase] | None:
|
43
|
+
from hypothesis import Phase
|
44
|
+
|
45
|
+
phases = set(Phase) - {Phase.explain}
|
46
|
+
if no_shrink:
|
47
|
+
return list(phases - {Phase.shrink})
|
48
|
+
return list(phases)
|
87
49
|
|
88
50
|
|
89
51
|
def prepare_settings(
|
schemathesis/core/__init__.py
CHANGED
@@ -16,6 +16,8 @@ class SpecificationFeature(str, enum.Enum):
|
|
16
16
|
"""Features that Schemathesis can provide for different specifications."""
|
17
17
|
|
18
18
|
STATEFUL_TESTING = "stateful_testing"
|
19
|
+
COVERAGE = "coverage_tests"
|
20
|
+
EXAMPLES = "example_tests"
|
19
21
|
|
20
22
|
|
21
23
|
@dataclass
|
@@ -39,7 +41,11 @@ class Specification:
|
|
39
41
|
def supports_feature(self, feature: SpecificationFeature) -> bool:
|
40
42
|
"""Check if Schemathesis supports a given feature for this specification."""
|
41
43
|
if self.kind == SpecificationKind.OPENAPI:
|
42
|
-
return feature in {
|
44
|
+
return feature in {
|
45
|
+
SpecificationFeature.STATEFUL_TESTING,
|
46
|
+
SpecificationFeature.COVERAGE,
|
47
|
+
SpecificationFeature.EXAMPLES,
|
48
|
+
}
|
43
49
|
return False
|
44
50
|
|
45
51
|
|
schemathesis/engine/config.py
CHANGED
@@ -25,14 +25,14 @@ def _default_hypothesis_settings() -> hypothesis.settings:
|
|
25
25
|
class ExecutionConfig:
|
26
26
|
"""Configuration for test execution."""
|
27
27
|
|
28
|
-
phases: list[PhaseName] = field(default_factory=
|
28
|
+
phases: list[PhaseName] = field(default_factory=PhaseName.defaults)
|
29
29
|
checks: list[CheckFunction] = field(default_factory=lambda: [not_a_server_error])
|
30
30
|
targets: list[TargetFunction] = field(default_factory=list)
|
31
31
|
hypothesis_settings: hypothesis.settings = field(default_factory=_default_hypothesis_settings)
|
32
32
|
generation: GenerationConfig = field(default_factory=GenerationConfig)
|
33
33
|
max_failures: int | None = None
|
34
34
|
unique_inputs: bool = False
|
35
|
-
|
35
|
+
continue_on_failure: bool = False
|
36
36
|
seed: int | None = None
|
37
37
|
workers_num: int = 1
|
38
38
|
|
schemathesis/engine/core.py
CHANGED
@@ -34,7 +34,17 @@ class Engine:
|
|
34
34
|
"""Create execution plan based on configuration."""
|
35
35
|
phases = [
|
36
36
|
self.get_phase_config(PhaseName.PROBING, is_supported=True, requires_links=False),
|
37
|
-
self.get_phase_config(
|
37
|
+
self.get_phase_config(
|
38
|
+
PhaseName.EXAMPLES,
|
39
|
+
is_supported=self.schema.specification.supports_feature(SpecificationFeature.EXAMPLES),
|
40
|
+
requires_links=False,
|
41
|
+
),
|
42
|
+
self.get_phase_config(
|
43
|
+
PhaseName.COVERAGE,
|
44
|
+
is_supported=self.schema.specification.supports_feature(SpecificationFeature.COVERAGE),
|
45
|
+
requires_links=False,
|
46
|
+
),
|
47
|
+
self.get_phase_config(PhaseName.FUZZING, is_supported=True, requires_links=False),
|
38
48
|
self.get_phase_config(
|
39
49
|
PhaseName.STATEFUL_TESTING,
|
40
50
|
is_supported=self.schema.specification.supports_feature(SpecificationFeature.STATEFUL_TESTING),
|
schemathesis/engine/events.py
CHANGED
@@ -209,6 +209,13 @@ class NonFatalError(EngineEvent):
|
|
209
209
|
self.label = label
|
210
210
|
self.related_to_operation = related_to_operation
|
211
211
|
|
212
|
+
def __eq__(self, other: object) -> bool:
|
213
|
+
assert isinstance(other, NonFatalError)
|
214
|
+
return self.label == other.label and type(self.value) is type(other.value)
|
215
|
+
|
216
|
+
def __hash__(self) -> int:
|
217
|
+
return hash((self.label, type(self.value)))
|
218
|
+
|
212
219
|
|
213
220
|
@dataclass
|
214
221
|
class FatalError(EngineEvent):
|
@@ -14,14 +14,22 @@ class PhaseName(enum.Enum):
|
|
14
14
|
"""Available execution phases."""
|
15
15
|
|
16
16
|
PROBING = "API probing"
|
17
|
-
|
18
|
-
|
17
|
+
EXAMPLES = "Examples"
|
18
|
+
COVERAGE = "Coverage"
|
19
|
+
FUZZING = "Fuzzing"
|
20
|
+
STATEFUL_TESTING = "Stateful"
|
21
|
+
|
22
|
+
@classmethod
|
23
|
+
def defaults(cls) -> list[PhaseName]:
|
24
|
+
return [PhaseName.EXAMPLES, PhaseName.COVERAGE, PhaseName.FUZZING, PhaseName.STATEFUL_TESTING]
|
19
25
|
|
20
26
|
@classmethod
|
21
27
|
def from_str(cls, value: str) -> PhaseName:
|
22
28
|
return {
|
23
29
|
"probing": cls.PROBING,
|
24
|
-
"
|
30
|
+
"examples": cls.EXAMPLES,
|
31
|
+
"coverage": cls.COVERAGE,
|
32
|
+
"fuzzing": cls.FUZZING,
|
25
33
|
"stateful": cls.STATEFUL_TESTING,
|
26
34
|
}[value.lower()]
|
27
35
|
|
@@ -60,7 +68,11 @@ def execute(ctx: EngineContext, phase: Phase) -> EventGenerator:
|
|
60
68
|
|
61
69
|
if phase.name == PhaseName.PROBING:
|
62
70
|
yield from probes.execute(ctx, phase)
|
63
|
-
elif phase.name == PhaseName.
|
71
|
+
elif phase.name == PhaseName.EXAMPLES:
|
72
|
+
yield from unit.execute(ctx, phase)
|
73
|
+
elif phase.name == PhaseName.COVERAGE:
|
74
|
+
yield from unit.execute(ctx, phase)
|
75
|
+
elif phase.name == PhaseName.FUZZING:
|
64
76
|
yield from unit.execute(ctx, phase)
|
65
77
|
elif phase.name == PhaseName.STATEFUL_TESTING:
|
66
78
|
yield from stateful.execute(ctx, phase)
|