schemathesis 4.0.0a4__py3-none-any.whl → 4.0.0a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,7 +15,6 @@ from schemathesis.cli.commands.run.filters import FilterArguments, with_filters
15
15
  from schemathesis.cli.commands.run.hypothesis import (
16
16
  HYPOTHESIS_IN_MEMORY_DATABASE_IDENTIFIER,
17
17
  HealthCheck,
18
- Phase,
19
18
  prepare_health_checks,
20
19
  prepare_phases,
21
20
  prepare_settings,
@@ -44,7 +43,7 @@ from schemathesis.specs.openapi.checks import * # noqa: F401, F403
44
43
 
45
44
  COLOR_OPTIONS_INVALID_USAGE_MESSAGE = "Can't use `--no-color` and `--force-color` simultaneously"
46
45
 
47
- DEFAULT_PHASES = ("unit", "stateful")
46
+ DEFAULT_PHASES = ("examples", "coverage", "fuzzing", "stateful")
48
47
 
49
48
 
50
49
  @click.argument("schema", type=str) # type: ignore[misc]
@@ -76,7 +75,7 @@ DEFAULT_PHASES = ("unit", "stateful")
76
75
  @grouped_option(
77
76
  "--phases",
78
77
  help="A comma-separated list of test phases to run",
79
- type=CsvChoice(["unit", "stateful"]),
78
+ type=CsvChoice(DEFAULT_PHASES),
80
79
  default=",".join(DEFAULT_PHASES),
81
80
  metavar="",
82
81
  )
@@ -117,15 +116,6 @@ DEFAULT_PHASES = ("unit", "stateful")
117
116
  show_default=True,
118
117
  metavar="",
119
118
  )
120
- @grouped_option(
121
- "-x",
122
- "--exitfirst",
123
- "exit_first",
124
- is_flag=True,
125
- default=False,
126
- help="Terminate the test suite immediately upon the first failure or error encountered",
127
- show_default=True,
128
- )
129
119
  @grouped_option(
130
120
  "--max-failures",
131
121
  "max_failures",
@@ -133,6 +123,14 @@ DEFAULT_PHASES = ("unit", "stateful")
133
123
  help="Terminate the test suite after reaching a specified number of failures or errors",
134
124
  show_default=True,
135
125
  )
126
+ @grouped_option(
127
+ "--continue-on-failure",
128
+ "continue_on_failure",
129
+ help="Continue executing all test cases within a scenario, even after encountering failures",
130
+ is_flag=True,
131
+ default=False,
132
+ metavar="",
133
+ )
136
134
  @grouped_option(
137
135
  "--max-response-time",
138
136
  help="Maximum allowed API response time in seconds",
@@ -307,15 +305,6 @@ DEFAULT_PHASES = ("unit", "stateful")
307
305
  multiple=True,
308
306
  metavar="FEATURES",
309
307
  )
310
- @grouped_option(
311
- "--experimental-no-failfast",
312
- "no_failfast",
313
- help="Continue testing an API operation after a failure is found",
314
- is_flag=True,
315
- default=False,
316
- metavar="",
317
- envvar="SCHEMATHESIS_EXPERIMENTAL_NO_FAILFAST",
318
- )
319
308
  @grouped_option(
320
309
  "--experimental-missing-required-header-allowed-statuses",
321
310
  "missing_required_header_allowed_statuses",
@@ -407,7 +396,7 @@ DEFAULT_PHASES = ("unit", "stateful")
407
396
  default=None,
408
397
  callback=validation.reduce_list,
409
398
  show_default=True,
410
- metavar="TARGET",
399
+ metavar="METRIC",
411
400
  )
412
401
  @grouped_option(
413
402
  "--generation-with-security-parameters",
@@ -486,19 +475,6 @@ DEFAULT_PHASES = ("unit", "stateful")
486
475
  type=str,
487
476
  callback=validation.validate_set_path,
488
477
  )
489
- @group("Hypothesis engine options")
490
- @grouped_option(
491
- "--hypothesis-phases",
492
- help="Testing phases to execute",
493
- type=CsvEnumChoice(Phase),
494
- metavar="",
495
- )
496
- @grouped_option(
497
- "--hypothesis-no-phases",
498
- help="Testing phases to exclude from execution",
499
- type=CsvEnumChoice(Phase),
500
- metavar="",
501
- )
502
478
  @group("Global options")
503
479
  @grouped_option("--no-color", help="Disable ANSI color escape codes", type=bool, is_flag=True)
504
480
  @grouped_option("--force-color", help="Explicitly tells to enable ANSI color escape codes", type=bool, is_flag=True)
@@ -513,7 +489,6 @@ def run(
513
489
  set_cookie: dict[str, str],
514
490
  set_path: dict[str, str],
515
491
  experiments: list,
516
- no_failfast: bool,
517
492
  missing_required_header_allowed_statuses: list[str],
518
493
  positive_data_acceptance_allowed_statuses: list[str],
519
494
  negative_data_rejection_allowed_statuses: list[str],
@@ -521,8 +496,8 @@ def run(
521
496
  excluded_check_names: Sequence[str],
522
497
  max_response_time: float | None = None,
523
498
  phases: Sequence[str] = DEFAULT_PHASES,
524
- exit_first: bool = False,
525
499
  max_failures: int | None = None,
500
+ continue_on_failure: bool = False,
526
501
  include_path: Sequence[str] = (),
527
502
  include_path_regex: str | None = None,
528
503
  include_method: Sequence[str] = (),
@@ -565,8 +540,6 @@ def run(
565
540
  output_sanitize: bool = True,
566
541
  output_truncate: bool = True,
567
542
  contrib_openapi_fill_missing_examples: bool = False,
568
- hypothesis_phases: list[Phase] | None = None,
569
- hypothesis_no_phases: list[Phase] | None = None,
570
543
  generation_modes: tuple[GenerationMode, ...] = DEFAULT_GENERATOR_MODES,
571
544
  generation_seed: int | None = None,
572
545
  generation_max_examples: int | None = None,
@@ -593,7 +566,7 @@ def run(
593
566
 
594
567
  validation.validate_schema(schema, base_url)
595
568
 
596
- _hypothesis_phases = prepare_phases(hypothesis_phases, hypothesis_no_phases, generation_no_shrink)
569
+ _hypothesis_phases = prepare_phases(generation_no_shrink)
597
570
  _hypothesis_suppress_health_check = prepare_health_checks(suppress_health_check)
598
571
 
599
572
  for experiment in experiments:
@@ -640,9 +613,6 @@ def run(
640
613
  max_response_time=max_response_time,
641
614
  ).into()
642
615
 
643
- if exit_first and max_failures is None:
644
- max_failures = 1
645
-
646
616
  report_config = None
647
617
  if report_formats or report_junit_path or report_vcr_path or report_har_path:
648
618
  report_config = ReportConfig(
@@ -687,7 +657,7 @@ def run(
687
657
  with_security_parameters=generation_with_security_parameters,
688
658
  ),
689
659
  max_failures=max_failures,
690
- no_failfast=no_failfast,
660
+ continue_on_failure=continue_on_failure,
691
661
  unique_inputs=generation_unique_inputs,
692
662
  seed=seed,
693
663
  workers_num=workers_num,
@@ -69,10 +69,9 @@ class CheckArguments:
69
69
  checks_config[_max_response_time] = MaxResponseTimeConfig(self.max_response_time)
70
70
  selected_checks.append(_max_response_time)
71
71
 
72
- if experimental.COVERAGE_PHASE.is_enabled:
73
- from schemathesis.specs.openapi.checks import unsupported_method
72
+ from schemathesis.specs.openapi.checks import unsupported_method
74
73
 
75
- selected_checks.append(unsupported_method)
74
+ selected_checks.append(unsupported_method)
76
75
 
77
76
  # Exclude checks based on their names
78
77
  selected_checks = [check for check in selected_checks if check.__name__ not in self.excluded_check_names]
@@ -20,8 +20,9 @@ if TYPE_CHECKING:
20
20
  class Statistic:
21
21
  """Running statistics about test execution."""
22
22
 
23
- outcomes: dict[Status, int]
24
23
  failures: dict[str, dict[str, GroupedFailures]]
24
+ # Track first case_id where each unique failure was found
25
+ unique_failures_map: dict[Failure, str]
25
26
 
26
27
  extraction_failures: set[ExtractionFailure]
27
28
 
@@ -32,8 +33,8 @@ class Statistic:
32
33
  cases_without_checks: int
33
34
 
34
35
  __slots__ = (
35
- "outcomes",
36
36
  "failures",
37
+ "unique_failures_map",
37
38
  "extraction_failures",
38
39
  "tested_operations",
39
40
  "total_cases",
@@ -42,15 +43,15 @@ class Statistic:
42
43
  )
43
44
 
44
45
  def __init__(self) -> None:
45
- self.outcomes = {}
46
46
  self.failures = {}
47
+ self.unique_failures_map = {}
47
48
  self.extraction_failures = set()
48
49
  self.tested_operations = set()
49
50
  self.total_cases = 0
50
51
  self.cases_with_failures = 0
51
52
  self.cases_without_checks = 0
52
53
 
53
- def record_checks(self, recorder: ScenarioRecorder) -> None:
54
+ def on_scenario_finished(self, recorder: ScenarioRecorder) -> None:
54
55
  """Update statistics and store failures from a new batch of checks."""
55
56
  from schemathesis.generation.stateful.state_machine import ExtractionFailure
56
57
 
@@ -80,20 +81,32 @@ class Statistic:
80
81
 
81
82
  self.tested_operations.add(case.value.operation.label)
82
83
  has_failures = False
83
- for check in checks:
84
- response = recorder.interactions[case_id].response
84
+ current_case_failures = []
85
+ last_failure_info = None
85
86
 
86
- # Collect failures
87
+ for check in checks:
87
88
  if check.failure_info is not None:
88
- has_failures = True
89
- if case_id not in failures:
90
- failures[case_id] = GroupedFailures(
91
- case_id=case_id,
92
- code_sample=check.failure_info.code_sample,
93
- failures=[],
94
- response=response,
95
- )
96
- failures[case_id].failures.append(check.failure_info.failure)
89
+ failure = check.failure_info.failure
90
+
91
+ # Check if this is a new unique failure
92
+ if failure not in self.unique_failures_map:
93
+ last_failure_info = check.failure_info
94
+ self.unique_failures_map[failure] = case_id
95
+ current_case_failures.append(failure)
96
+ has_failures = True
97
+ else:
98
+ # This failure was already seen - skip it
99
+ continue
100
+
101
+ if current_case_failures:
102
+ assert last_failure_info is not None
103
+ failures[case_id] = GroupedFailures(
104
+ case_id=case_id,
105
+ code_sample=last_failure_info.code_sample,
106
+ failures=current_case_failures,
107
+ response=recorder.interactions[case_id].response,
108
+ )
109
+
97
110
  if has_failures:
98
111
  self.cases_with_failures += 1
99
112
 
@@ -178,7 +191,7 @@ class ExecutionContext:
178
191
 
179
192
  def on_event(self, event: events.EngineEvent) -> None:
180
193
  if isinstance(event, events.ScenarioFinished):
181
- self.statistic.record_checks(event.recorder)
194
+ self.statistic.on_scenario_finished(event.recorder)
182
195
  elif isinstance(event, events.NonFatalError) or (
183
196
  isinstance(event, events.PhaseFinished)
184
197
  and event.phase.is_enabled
@@ -125,6 +125,7 @@ def initialize_handlers(config: RunConfig) -> list[EventHandler]:
125
125
  seed=config.engine.execution.seed,
126
126
  rate_limit=config.rate_limit,
127
127
  wait_for_schema=config.wait_for_schema,
128
+ engine_config=config.engine,
128
129
  report_config=config.report,
129
130
  )
130
131
  )