schemathesis 3.39.15__py3-none-any.whl → 4.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- schemathesis/__init__.py +41 -79
- schemathesis/auths.py +111 -122
- schemathesis/checks.py +169 -60
- schemathesis/cli/__init__.py +15 -2117
- schemathesis/cli/commands/__init__.py +85 -0
- schemathesis/cli/commands/data.py +10 -0
- schemathesis/cli/commands/run/__init__.py +590 -0
- schemathesis/cli/commands/run/context.py +204 -0
- schemathesis/cli/commands/run/events.py +60 -0
- schemathesis/cli/commands/run/executor.py +157 -0
- schemathesis/cli/commands/run/filters.py +53 -0
- schemathesis/cli/commands/run/handlers/__init__.py +46 -0
- schemathesis/cli/commands/run/handlers/base.py +18 -0
- schemathesis/cli/commands/run/handlers/cassettes.py +474 -0
- schemathesis/cli/commands/run/handlers/junitxml.py +55 -0
- schemathesis/cli/commands/run/handlers/output.py +1628 -0
- schemathesis/cli/commands/run/loaders.py +114 -0
- schemathesis/cli/commands/run/validation.py +246 -0
- schemathesis/cli/constants.py +5 -58
- schemathesis/cli/core.py +19 -0
- schemathesis/cli/ext/fs.py +16 -0
- schemathesis/cli/ext/groups.py +84 -0
- schemathesis/cli/{options.py → ext/options.py} +36 -34
- schemathesis/config/__init__.py +189 -0
- schemathesis/config/_auth.py +51 -0
- schemathesis/config/_checks.py +268 -0
- schemathesis/config/_diff_base.py +99 -0
- schemathesis/config/_env.py +21 -0
- schemathesis/config/_error.py +156 -0
- schemathesis/config/_generation.py +149 -0
- schemathesis/config/_health_check.py +24 -0
- schemathesis/config/_operations.py +327 -0
- schemathesis/config/_output.py +171 -0
- schemathesis/config/_parameters.py +19 -0
- schemathesis/config/_phases.py +187 -0
- schemathesis/config/_projects.py +527 -0
- schemathesis/config/_rate_limit.py +17 -0
- schemathesis/config/_report.py +120 -0
- schemathesis/config/_validator.py +9 -0
- schemathesis/config/_warnings.py +25 -0
- schemathesis/config/schema.json +885 -0
- schemathesis/core/__init__.py +67 -0
- schemathesis/core/compat.py +32 -0
- schemathesis/core/control.py +2 -0
- schemathesis/core/curl.py +58 -0
- schemathesis/core/deserialization.py +65 -0
- schemathesis/core/errors.py +459 -0
- schemathesis/core/failures.py +315 -0
- schemathesis/core/fs.py +19 -0
- schemathesis/core/hooks.py +20 -0
- schemathesis/core/loaders.py +104 -0
- schemathesis/core/marks.py +66 -0
- schemathesis/{transports/content_types.py → core/media_types.py} +14 -12
- schemathesis/core/output/__init__.py +46 -0
- schemathesis/core/output/sanitization.py +54 -0
- schemathesis/{throttling.py → core/rate_limit.py} +16 -17
- schemathesis/core/registries.py +31 -0
- schemathesis/core/transforms.py +113 -0
- schemathesis/core/transport.py +223 -0
- schemathesis/core/validation.py +54 -0
- schemathesis/core/version.py +7 -0
- schemathesis/engine/__init__.py +28 -0
- schemathesis/engine/context.py +118 -0
- schemathesis/engine/control.py +36 -0
- schemathesis/engine/core.py +169 -0
- schemathesis/engine/errors.py +464 -0
- schemathesis/engine/events.py +258 -0
- schemathesis/engine/phases/__init__.py +88 -0
- schemathesis/{runner → engine/phases}/probes.py +52 -68
- schemathesis/engine/phases/stateful/__init__.py +68 -0
- schemathesis/engine/phases/stateful/_executor.py +356 -0
- schemathesis/engine/phases/stateful/context.py +85 -0
- schemathesis/engine/phases/unit/__init__.py +212 -0
- schemathesis/engine/phases/unit/_executor.py +416 -0
- schemathesis/engine/phases/unit/_pool.py +82 -0
- schemathesis/engine/recorder.py +247 -0
- schemathesis/errors.py +43 -0
- schemathesis/filters.py +17 -98
- schemathesis/generation/__init__.py +5 -33
- schemathesis/generation/case.py +317 -0
- schemathesis/generation/coverage.py +282 -175
- schemathesis/generation/hypothesis/__init__.py +36 -0
- schemathesis/generation/hypothesis/builder.py +800 -0
- schemathesis/generation/{_hypothesis.py → hypothesis/examples.py} +2 -11
- schemathesis/generation/hypothesis/given.py +66 -0
- schemathesis/generation/hypothesis/reporting.py +14 -0
- schemathesis/generation/hypothesis/strategies.py +16 -0
- schemathesis/generation/meta.py +115 -0
- schemathesis/generation/metrics.py +93 -0
- schemathesis/generation/modes.py +20 -0
- schemathesis/generation/overrides.py +116 -0
- schemathesis/generation/stateful/__init__.py +37 -0
- schemathesis/generation/stateful/state_machine.py +278 -0
- schemathesis/graphql/__init__.py +15 -0
- schemathesis/graphql/checks.py +109 -0
- schemathesis/graphql/loaders.py +284 -0
- schemathesis/hooks.py +80 -101
- schemathesis/openapi/__init__.py +13 -0
- schemathesis/openapi/checks.py +455 -0
- schemathesis/openapi/generation/__init__.py +0 -0
- schemathesis/openapi/generation/filters.py +72 -0
- schemathesis/openapi/loaders.py +313 -0
- schemathesis/pytest/__init__.py +5 -0
- schemathesis/pytest/control_flow.py +7 -0
- schemathesis/pytest/lazy.py +281 -0
- schemathesis/pytest/loaders.py +36 -0
- schemathesis/{extra/pytest_plugin.py → pytest/plugin.py} +128 -108
- schemathesis/python/__init__.py +0 -0
- schemathesis/python/asgi.py +12 -0
- schemathesis/python/wsgi.py +12 -0
- schemathesis/schemas.py +537 -273
- schemathesis/specs/graphql/__init__.py +0 -1
- schemathesis/specs/graphql/_cache.py +1 -2
- schemathesis/specs/graphql/scalars.py +42 -6
- schemathesis/specs/graphql/schemas.py +141 -137
- schemathesis/specs/graphql/validation.py +11 -17
- schemathesis/specs/openapi/__init__.py +6 -1
- schemathesis/specs/openapi/_cache.py +1 -2
- schemathesis/specs/openapi/_hypothesis.py +142 -156
- schemathesis/specs/openapi/checks.py +368 -257
- schemathesis/specs/openapi/converter.py +4 -4
- schemathesis/specs/openapi/definitions.py +1 -1
- schemathesis/specs/openapi/examples.py +23 -21
- schemathesis/specs/openapi/expressions/__init__.py +31 -19
- schemathesis/specs/openapi/expressions/extractors.py +1 -4
- schemathesis/specs/openapi/expressions/lexer.py +1 -1
- schemathesis/specs/openapi/expressions/nodes.py +36 -41
- schemathesis/specs/openapi/expressions/parser.py +1 -1
- schemathesis/specs/openapi/formats.py +35 -7
- schemathesis/specs/openapi/media_types.py +53 -12
- schemathesis/specs/openapi/negative/__init__.py +7 -4
- schemathesis/specs/openapi/negative/mutations.py +6 -5
- schemathesis/specs/openapi/parameters.py +7 -10
- schemathesis/specs/openapi/patterns.py +94 -31
- schemathesis/specs/openapi/references.py +12 -53
- schemathesis/specs/openapi/schemas.py +238 -308
- schemathesis/specs/openapi/security.py +1 -1
- schemathesis/specs/openapi/serialization.py +12 -6
- schemathesis/specs/openapi/stateful/__init__.py +268 -133
- schemathesis/specs/openapi/stateful/control.py +87 -0
- schemathesis/specs/openapi/stateful/links.py +209 -0
- schemathesis/transport/__init__.py +142 -0
- schemathesis/transport/asgi.py +26 -0
- schemathesis/transport/prepare.py +124 -0
- schemathesis/transport/requests.py +244 -0
- schemathesis/{_xml.py → transport/serialization.py} +69 -11
- schemathesis/transport/wsgi.py +171 -0
- schemathesis-4.0.0.dist-info/METADATA +204 -0
- schemathesis-4.0.0.dist-info/RECORD +164 -0
- {schemathesis-3.39.15.dist-info → schemathesis-4.0.0.dist-info}/entry_points.txt +1 -1
- {schemathesis-3.39.15.dist-info → schemathesis-4.0.0.dist-info}/licenses/LICENSE +1 -1
- schemathesis/_compat.py +0 -74
- schemathesis/_dependency_versions.py +0 -19
- schemathesis/_hypothesis.py +0 -712
- schemathesis/_override.py +0 -50
- schemathesis/_patches.py +0 -21
- schemathesis/_rate_limiter.py +0 -7
- schemathesis/cli/callbacks.py +0 -466
- schemathesis/cli/cassettes.py +0 -561
- schemathesis/cli/context.py +0 -75
- schemathesis/cli/debug.py +0 -27
- schemathesis/cli/handlers.py +0 -19
- schemathesis/cli/junitxml.py +0 -124
- schemathesis/cli/output/__init__.py +0 -1
- schemathesis/cli/output/default.py +0 -920
- schemathesis/cli/output/short.py +0 -59
- schemathesis/cli/reporting.py +0 -79
- schemathesis/cli/sanitization.py +0 -26
- schemathesis/code_samples.py +0 -151
- schemathesis/constants.py +0 -54
- schemathesis/contrib/__init__.py +0 -11
- schemathesis/contrib/openapi/__init__.py +0 -11
- schemathesis/contrib/openapi/fill_missing_examples.py +0 -24
- schemathesis/contrib/openapi/formats/__init__.py +0 -9
- schemathesis/contrib/openapi/formats/uuid.py +0 -16
- schemathesis/contrib/unique_data.py +0 -41
- schemathesis/exceptions.py +0 -571
- schemathesis/experimental/__init__.py +0 -109
- schemathesis/extra/_aiohttp.py +0 -28
- schemathesis/extra/_flask.py +0 -13
- schemathesis/extra/_server.py +0 -18
- schemathesis/failures.py +0 -284
- schemathesis/fixups/__init__.py +0 -37
- schemathesis/fixups/fast_api.py +0 -41
- schemathesis/fixups/utf8_bom.py +0 -28
- schemathesis/generation/_methods.py +0 -44
- schemathesis/graphql.py +0 -3
- schemathesis/internal/__init__.py +0 -7
- schemathesis/internal/checks.py +0 -86
- schemathesis/internal/copy.py +0 -32
- schemathesis/internal/datetime.py +0 -5
- schemathesis/internal/deprecation.py +0 -37
- schemathesis/internal/diff.py +0 -15
- schemathesis/internal/extensions.py +0 -27
- schemathesis/internal/jsonschema.py +0 -36
- schemathesis/internal/output.py +0 -68
- schemathesis/internal/transformation.py +0 -26
- schemathesis/internal/validation.py +0 -34
- schemathesis/lazy.py +0 -474
- schemathesis/loaders.py +0 -122
- schemathesis/models.py +0 -1341
- schemathesis/parameters.py +0 -90
- schemathesis/runner/__init__.py +0 -605
- schemathesis/runner/events.py +0 -389
- schemathesis/runner/impl/__init__.py +0 -3
- schemathesis/runner/impl/context.py +0 -88
- schemathesis/runner/impl/core.py +0 -1280
- schemathesis/runner/impl/solo.py +0 -80
- schemathesis/runner/impl/threadpool.py +0 -391
- schemathesis/runner/serialization.py +0 -544
- schemathesis/sanitization.py +0 -252
- schemathesis/serializers.py +0 -328
- schemathesis/service/__init__.py +0 -18
- schemathesis/service/auth.py +0 -11
- schemathesis/service/ci.py +0 -202
- schemathesis/service/client.py +0 -133
- schemathesis/service/constants.py +0 -38
- schemathesis/service/events.py +0 -61
- schemathesis/service/extensions.py +0 -224
- schemathesis/service/hosts.py +0 -111
- schemathesis/service/metadata.py +0 -71
- schemathesis/service/models.py +0 -258
- schemathesis/service/report.py +0 -255
- schemathesis/service/serialization.py +0 -173
- schemathesis/service/usage.py +0 -66
- schemathesis/specs/graphql/loaders.py +0 -364
- schemathesis/specs/openapi/expressions/context.py +0 -16
- schemathesis/specs/openapi/links.py +0 -389
- schemathesis/specs/openapi/loaders.py +0 -707
- schemathesis/specs/openapi/stateful/statistic.py +0 -198
- schemathesis/specs/openapi/stateful/types.py +0 -14
- schemathesis/specs/openapi/validation.py +0 -26
- schemathesis/stateful/__init__.py +0 -147
- schemathesis/stateful/config.py +0 -97
- schemathesis/stateful/context.py +0 -135
- schemathesis/stateful/events.py +0 -274
- schemathesis/stateful/runner.py +0 -309
- schemathesis/stateful/sink.py +0 -68
- schemathesis/stateful/state_machine.py +0 -328
- schemathesis/stateful/statistic.py +0 -22
- schemathesis/stateful/validation.py +0 -100
- schemathesis/targets.py +0 -77
- schemathesis/transports/__init__.py +0 -369
- schemathesis/transports/asgi.py +0 -7
- schemathesis/transports/auth.py +0 -38
- schemathesis/transports/headers.py +0 -36
- schemathesis/transports/responses.py +0 -57
- schemathesis/types.py +0 -44
- schemathesis/utils.py +0 -164
- schemathesis-3.39.15.dist-info/METADATA +0 -293
- schemathesis-3.39.15.dist-info/RECORD +0 -160
- /schemathesis/{extra → cli/ext}/__init__.py +0 -0
- /schemathesis/{_lazy_import.py → core/lazy_import.py} +0 -0
- /schemathesis/{internal → core}/result.py +0 -0
- {schemathesis-3.39.15.dist-info → schemathesis-4.0.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,1628 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import os
|
4
|
+
import textwrap
|
5
|
+
import time
|
6
|
+
from dataclasses import dataclass, field
|
7
|
+
from itertools import groupby
|
8
|
+
from json.decoder import JSONDecodeError
|
9
|
+
from types import GeneratorType
|
10
|
+
from typing import TYPE_CHECKING, Any, Generator, Iterable
|
11
|
+
|
12
|
+
import click
|
13
|
+
|
14
|
+
from schemathesis.cli.commands.run.context import ExecutionContext, GroupedFailures
|
15
|
+
from schemathesis.cli.commands.run.events import LoadingFinished, LoadingStarted
|
16
|
+
from schemathesis.cli.commands.run.handlers.base import EventHandler
|
17
|
+
from schemathesis.cli.constants import ISSUE_TRACKER_URL
|
18
|
+
from schemathesis.cli.core import get_terminal_width
|
19
|
+
from schemathesis.config import ProjectConfig, ReportFormat, SchemathesisWarning
|
20
|
+
from schemathesis.core.errors import LoaderError, LoaderErrorKind, format_exception, split_traceback
|
21
|
+
from schemathesis.core.failures import MessageBlock, Severity, format_failures
|
22
|
+
from schemathesis.core.output import prepare_response_payload
|
23
|
+
from schemathesis.core.result import Err, Ok
|
24
|
+
from schemathesis.core.version import SCHEMATHESIS_VERSION
|
25
|
+
from schemathesis.engine import Status, events
|
26
|
+
from schemathesis.engine.errors import EngineErrorInfo
|
27
|
+
from schemathesis.engine.phases import PhaseName, PhaseSkipReason
|
28
|
+
from schemathesis.engine.phases.probes import ProbeOutcome
|
29
|
+
from schemathesis.engine.recorder import Interaction, ScenarioRecorder
|
30
|
+
from schemathesis.generation.modes import GenerationMode
|
31
|
+
from schemathesis.schemas import ApiStatistic
|
32
|
+
|
33
|
+
if TYPE_CHECKING:
|
34
|
+
from rich.console import Console, Group
|
35
|
+
from rich.live import Live
|
36
|
+
from rich.progress import Progress, TaskID
|
37
|
+
from rich.text import Text
|
38
|
+
|
39
|
+
from schemathesis.generation.stateful.state_machine import ExtractionFailure
|
40
|
+
|
41
|
+
IO_ENCODING = os.getenv("PYTHONIOENCODING", "utf-8")
|
42
|
+
DISCORD_LINK = "https://discord.gg/R9ASRAmHnA"
|
43
|
+
|
44
|
+
|
45
|
+
def display_section_name(title: str, separator: str = "=", **kwargs: Any) -> None:
|
46
|
+
"""Print section name with separators in terminal with the given title nicely centered."""
|
47
|
+
message = f" {title} ".center(get_terminal_width(), separator)
|
48
|
+
kwargs.setdefault("bold", True)
|
49
|
+
click.echo(_style(message, **kwargs))
|
50
|
+
|
51
|
+
|
52
|
+
def bold(option: str) -> str:
|
53
|
+
return click.style(option, bold=True)
|
54
|
+
|
55
|
+
|
56
|
+
def display_failures(ctx: ExecutionContext) -> None:
|
57
|
+
"""Display all failures in the test run."""
|
58
|
+
if not ctx.statistic.failures:
|
59
|
+
return
|
60
|
+
|
61
|
+
display_section_name("FAILURES")
|
62
|
+
for label, failures in ctx.statistic.failures.items():
|
63
|
+
display_failures_for_single_test(ctx, label, failures.values())
|
64
|
+
|
65
|
+
|
66
|
+
if IO_ENCODING != "utf-8":
|
67
|
+
HEADER_SEPARATOR = "-"
|
68
|
+
|
69
|
+
def _style(text: str, **kwargs: Any) -> str:
|
70
|
+
text = text.encode(IO_ENCODING, errors="replace").decode("utf-8")
|
71
|
+
return click.style(text, **kwargs)
|
72
|
+
|
73
|
+
else:
|
74
|
+
HEADER_SEPARATOR = "━"
|
75
|
+
|
76
|
+
def _style(text: str, **kwargs: Any) -> str:
|
77
|
+
return click.style(text, **kwargs)
|
78
|
+
|
79
|
+
|
80
|
+
def failure_formatter(block: MessageBlock, content: str) -> str:
|
81
|
+
if block == MessageBlock.CASE_ID:
|
82
|
+
return _style(content, bold=True)
|
83
|
+
if block == MessageBlock.FAILURE:
|
84
|
+
return _style(content, fg="red", bold=True)
|
85
|
+
if block == MessageBlock.STATUS:
|
86
|
+
return _style(content, bold=True)
|
87
|
+
assert block == MessageBlock.CURL
|
88
|
+
return _style(content.replace("Reproduce with", bold("Reproduce with")))
|
89
|
+
|
90
|
+
|
91
|
+
def display_failures_for_single_test(ctx: ExecutionContext, label: str, checks: Iterable[GroupedFailures]) -> None:
|
92
|
+
"""Display a failure for a single method / path."""
|
93
|
+
display_section_name(label, "_", fg="red")
|
94
|
+
for idx, group in enumerate(checks, 1):
|
95
|
+
click.echo(
|
96
|
+
format_failures(
|
97
|
+
case_id=f"{idx}. Test Case ID: {group.case_id}",
|
98
|
+
response=group.response,
|
99
|
+
failures=group.failures,
|
100
|
+
curl=group.code_sample,
|
101
|
+
formatter=failure_formatter,
|
102
|
+
config=ctx.config.output,
|
103
|
+
)
|
104
|
+
)
|
105
|
+
click.echo()
|
106
|
+
|
107
|
+
|
108
|
+
VERIFY_URL_SUGGESTION = "Verify that the URL points directly to the Open API schema or GraphQL endpoint"
|
109
|
+
DISABLE_SSL_SUGGESTION = f"Bypass SSL verification with {bold('`--tls-verify=false`')}."
|
110
|
+
LOADER_ERROR_SUGGESTIONS = {
|
111
|
+
# SSL-specific connection issue
|
112
|
+
LoaderErrorKind.CONNECTION_SSL: DISABLE_SSL_SUGGESTION,
|
113
|
+
# Other connection problems
|
114
|
+
LoaderErrorKind.CONNECTION_OTHER: f"Use {bold('`--wait-for-schema=NUM`')} to wait up to NUM seconds for schema availability.",
|
115
|
+
# Response issues
|
116
|
+
LoaderErrorKind.UNEXPECTED_CONTENT_TYPE: VERIFY_URL_SUGGESTION,
|
117
|
+
LoaderErrorKind.HTTP_FORBIDDEN: "Verify your API keys or authentication headers.",
|
118
|
+
LoaderErrorKind.HTTP_NOT_FOUND: VERIFY_URL_SUGGESTION,
|
119
|
+
# OpenAPI specification issues
|
120
|
+
LoaderErrorKind.OPEN_API_UNSPECIFIED_VERSION: "Include the version in the schema.",
|
121
|
+
# YAML specific issues
|
122
|
+
LoaderErrorKind.YAML_NUMERIC_STATUS_CODES: "Convert numeric status codes to strings.",
|
123
|
+
LoaderErrorKind.YAML_NON_STRING_KEYS: "Convert non-string keys to strings.",
|
124
|
+
# Unclassified
|
125
|
+
LoaderErrorKind.UNCLASSIFIED: f"If you suspect this is a Schemathesis issue and the schema is valid, please report it and include the schema if you can:\n\n {ISSUE_TRACKER_URL}",
|
126
|
+
}
|
127
|
+
|
128
|
+
|
129
|
+
def _display_extras(extras: list[str]) -> None:
|
130
|
+
if extras:
|
131
|
+
click.echo()
|
132
|
+
for extra in extras:
|
133
|
+
click.echo(_style(f" {extra}"))
|
134
|
+
|
135
|
+
|
136
|
+
def display_header(version: str) -> None:
|
137
|
+
prefix = "v" if version != "dev" else ""
|
138
|
+
header = f"Schemathesis {prefix}{version}"
|
139
|
+
click.echo(_style(header, bold=True))
|
140
|
+
click.echo(_style(HEADER_SEPARATOR * len(header), bold=True))
|
141
|
+
click.echo()
|
142
|
+
|
143
|
+
|
144
|
+
DEFAULT_INTERNAL_ERROR_MESSAGE = "An internal error occurred during the test run"
|
145
|
+
TRUNCATION_PLACEHOLDER = "[...]"
|
146
|
+
|
147
|
+
|
148
|
+
def _print_lines(lines: list[str | Generator[str, None, None]]) -> None:
|
149
|
+
for entry in lines:
|
150
|
+
if isinstance(entry, str):
|
151
|
+
click.echo(entry)
|
152
|
+
elif isinstance(entry, GeneratorType):
|
153
|
+
for line in entry:
|
154
|
+
click.echo(line)
|
155
|
+
|
156
|
+
|
157
|
+
def _default_console() -> Console:
|
158
|
+
from rich.console import Console
|
159
|
+
|
160
|
+
kwargs = {}
|
161
|
+
# For stdout recording in tests
|
162
|
+
if "PYTEST_VERSION" in os.environ:
|
163
|
+
kwargs["width"] = 240
|
164
|
+
return Console(**kwargs)
|
165
|
+
|
166
|
+
|
167
|
+
BLOCK_PADDING = (0, 1, 0, 1)
|
168
|
+
|
169
|
+
|
170
|
+
@dataclass
|
171
|
+
class LoadingProgressManager:
|
172
|
+
console: Console
|
173
|
+
location: str
|
174
|
+
start_time: float
|
175
|
+
progress: Progress
|
176
|
+
progress_task_id: TaskID | None
|
177
|
+
is_interrupted: bool
|
178
|
+
|
179
|
+
__slots__ = ("console", "location", "start_time", "progress", "progress_task_id", "is_interrupted")
|
180
|
+
|
181
|
+
def __init__(self, console: Console, location: str) -> None:
|
182
|
+
from rich.progress import Progress, RenderableColumn, SpinnerColumn, TextColumn
|
183
|
+
from rich.style import Style
|
184
|
+
from rich.text import Text
|
185
|
+
|
186
|
+
self.console = console
|
187
|
+
self.location = location
|
188
|
+
self.start_time = time.monotonic()
|
189
|
+
progress_message = Text.assemble(
|
190
|
+
("Loading specification from ", Style(color="white")),
|
191
|
+
(location, Style(color="cyan")),
|
192
|
+
)
|
193
|
+
self.progress = Progress(
|
194
|
+
TextColumn(""),
|
195
|
+
SpinnerColumn("clock"),
|
196
|
+
RenderableColumn(progress_message),
|
197
|
+
console=console,
|
198
|
+
transient=True,
|
199
|
+
)
|
200
|
+
self.progress_task_id = None
|
201
|
+
self.is_interrupted = False
|
202
|
+
|
203
|
+
def start(self) -> None:
|
204
|
+
"""Start loading progress display."""
|
205
|
+
self.progress_task_id = self.progress.add_task("Loading", total=None)
|
206
|
+
self.progress.start()
|
207
|
+
|
208
|
+
def stop(self) -> None:
|
209
|
+
"""Stop loading progress display."""
|
210
|
+
assert self.progress_task_id is not None
|
211
|
+
self.progress.stop_task(self.progress_task_id)
|
212
|
+
self.progress.stop()
|
213
|
+
|
214
|
+
def interrupt(self) -> None:
|
215
|
+
"""Handle interruption during loading."""
|
216
|
+
self.is_interrupted = True
|
217
|
+
self.stop()
|
218
|
+
|
219
|
+
def get_completion_message(self) -> Text:
|
220
|
+
"""Generate completion message including duration."""
|
221
|
+
from rich.style import Style
|
222
|
+
from rich.text import Text
|
223
|
+
|
224
|
+
duration = format_duration(int((time.monotonic() - self.start_time) * 1000))
|
225
|
+
if self.is_interrupted:
|
226
|
+
return Text.assemble(
|
227
|
+
("⚡ ", Style(color="yellow")),
|
228
|
+
(f"Loading interrupted after {duration} while loading from ", Style(color="white")),
|
229
|
+
(self.location, Style(color="cyan")),
|
230
|
+
)
|
231
|
+
return Text.assemble(
|
232
|
+
("✅ ", Style(color="green")),
|
233
|
+
("Loaded specification from ", Style(color="bright_white")),
|
234
|
+
(self.location, Style(color="cyan")),
|
235
|
+
(f" (in {duration})", Style(color="bright_white")),
|
236
|
+
)
|
237
|
+
|
238
|
+
def get_error_message(self, error: LoaderError) -> Group:
|
239
|
+
from rich.console import Group
|
240
|
+
from rich.style import Style
|
241
|
+
from rich.text import Text
|
242
|
+
|
243
|
+
duration = format_duration(int((time.monotonic() - self.start_time) * 1000))
|
244
|
+
|
245
|
+
# Show what was attempted
|
246
|
+
attempted = Text.assemble(
|
247
|
+
("❌ ", Style(color="red")),
|
248
|
+
("Failed to load specification from ", Style(color="white")),
|
249
|
+
(self.location, Style(color="cyan")),
|
250
|
+
(f" after {duration}", Style(color="white")),
|
251
|
+
)
|
252
|
+
|
253
|
+
# Show error details
|
254
|
+
error_title = Text("Schema Loading Error", style=Style(color="red", bold=True))
|
255
|
+
error_message = Text(error.message)
|
256
|
+
|
257
|
+
return Group(
|
258
|
+
attempted,
|
259
|
+
Text(),
|
260
|
+
error_title,
|
261
|
+
Text(),
|
262
|
+
error_message,
|
263
|
+
)
|
264
|
+
|
265
|
+
|
266
|
+
@dataclass
|
267
|
+
class ProbingProgressManager:
|
268
|
+
console: Console
|
269
|
+
start_time: float
|
270
|
+
progress: Progress
|
271
|
+
progress_task_id: TaskID | None
|
272
|
+
is_interrupted: bool
|
273
|
+
|
274
|
+
__slots__ = ("console", "start_time", "progress", "progress_task_id", "is_interrupted")
|
275
|
+
|
276
|
+
def __init__(self, console: Console) -> None:
|
277
|
+
from rich.progress import Progress, RenderableColumn, SpinnerColumn, TextColumn
|
278
|
+
from rich.text import Text
|
279
|
+
|
280
|
+
self.console = console
|
281
|
+
self.start_time = time.monotonic()
|
282
|
+
self.progress = Progress(
|
283
|
+
TextColumn(""),
|
284
|
+
SpinnerColumn("clock"),
|
285
|
+
RenderableColumn(Text("Probing API capabilities", style="bright_white")),
|
286
|
+
transient=True,
|
287
|
+
console=console,
|
288
|
+
)
|
289
|
+
self.progress_task_id = None
|
290
|
+
self.is_interrupted = False
|
291
|
+
|
292
|
+
def start(self) -> None:
|
293
|
+
"""Start probing progress display."""
|
294
|
+
self.progress_task_id = self.progress.add_task("Probing", total=None)
|
295
|
+
self.progress.start()
|
296
|
+
|
297
|
+
def stop(self) -> None:
|
298
|
+
"""Stop probing progress display."""
|
299
|
+
assert self.progress_task_id is not None
|
300
|
+
self.progress.stop_task(self.progress_task_id)
|
301
|
+
self.progress.stop()
|
302
|
+
|
303
|
+
def interrupt(self) -> None:
|
304
|
+
"""Handle interruption during probing."""
|
305
|
+
self.is_interrupted = True
|
306
|
+
self.stop()
|
307
|
+
|
308
|
+
def get_completion_message(self) -> Text:
|
309
|
+
"""Generate completion message including duration."""
|
310
|
+
from rich.style import Style
|
311
|
+
from rich.text import Text
|
312
|
+
|
313
|
+
duration = format_duration(int((time.monotonic() - self.start_time) * 1000))
|
314
|
+
if self.is_interrupted:
|
315
|
+
return Text.assemble(
|
316
|
+
("⚡ ", Style(color="yellow")),
|
317
|
+
(f"API probing interrupted after {duration}", Style(color="white")),
|
318
|
+
)
|
319
|
+
return Text.assemble(
|
320
|
+
("✅ ", Style(color="green")),
|
321
|
+
("API capabilities:", Style(color="white")),
|
322
|
+
)
|
323
|
+
|
324
|
+
|
325
|
+
@dataclass
|
326
|
+
class WarningData:
|
327
|
+
missing_auth: dict[int, set[str]] = field(default_factory=dict)
|
328
|
+
missing_test_data: set[str] = field(default_factory=set)
|
329
|
+
# operations that only returned 4xx
|
330
|
+
validation_mismatch: set[str] = field(default_factory=set)
|
331
|
+
|
332
|
+
@property
|
333
|
+
def is_empty(self) -> bool:
|
334
|
+
return not bool(self.missing_auth or self.missing_test_data or self.validation_mismatch)
|
335
|
+
|
336
|
+
|
337
|
+
@dataclass
|
338
|
+
class OperationProgress:
|
339
|
+
"""Tracks individual operation progress."""
|
340
|
+
|
341
|
+
label: str
|
342
|
+
start_time: float
|
343
|
+
task_id: TaskID
|
344
|
+
|
345
|
+
__slots__ = ("label", "start_time", "task_id")
|
346
|
+
|
347
|
+
|
348
|
+
@dataclass
|
349
|
+
class UnitTestProgressManager:
|
350
|
+
"""Manages progress display for unit tests."""
|
351
|
+
|
352
|
+
console: Console
|
353
|
+
title: str
|
354
|
+
current: int
|
355
|
+
total: int
|
356
|
+
start_time: float
|
357
|
+
|
358
|
+
# Progress components
|
359
|
+
title_progress: Progress
|
360
|
+
progress_bar: Progress
|
361
|
+
operations_progress: Progress
|
362
|
+
current_operations: dict[str, OperationProgress]
|
363
|
+
stats: dict[Status, int]
|
364
|
+
stats_progress: Progress
|
365
|
+
live: Live | None
|
366
|
+
|
367
|
+
# Task IDs
|
368
|
+
title_task_id: TaskID | None
|
369
|
+
progress_task_id: TaskID | None
|
370
|
+
stats_task_id: TaskID
|
371
|
+
|
372
|
+
is_interrupted: bool
|
373
|
+
|
374
|
+
__slots__ = (
|
375
|
+
"console",
|
376
|
+
"title",
|
377
|
+
"current",
|
378
|
+
"total",
|
379
|
+
"start_time",
|
380
|
+
"title_progress",
|
381
|
+
"progress_bar",
|
382
|
+
"operations_progress",
|
383
|
+
"current_operations",
|
384
|
+
"stats",
|
385
|
+
"stats_progress",
|
386
|
+
"live",
|
387
|
+
"title_task_id",
|
388
|
+
"progress_task_id",
|
389
|
+
"stats_task_id",
|
390
|
+
"is_interrupted",
|
391
|
+
)
|
392
|
+
|
393
|
+
def __init__(
|
394
|
+
self,
|
395
|
+
*,
|
396
|
+
console: Console,
|
397
|
+
title: str,
|
398
|
+
total: int,
|
399
|
+
) -> None:
|
400
|
+
from rich.progress import (
|
401
|
+
BarColumn,
|
402
|
+
Progress,
|
403
|
+
SpinnerColumn,
|
404
|
+
TextColumn,
|
405
|
+
TimeElapsedColumn,
|
406
|
+
)
|
407
|
+
from rich.style import Style
|
408
|
+
|
409
|
+
self.console = console
|
410
|
+
self.title = title
|
411
|
+
self.current = 0
|
412
|
+
self.total = total
|
413
|
+
self.start_time = time.monotonic()
|
414
|
+
|
415
|
+
# Initialize progress displays
|
416
|
+
self.title_progress = Progress(
|
417
|
+
TextColumn(""),
|
418
|
+
SpinnerColumn("clock"),
|
419
|
+
TextColumn("{task.description}", style=Style(color="white")),
|
420
|
+
console=self.console,
|
421
|
+
)
|
422
|
+
self.title_task_id = None
|
423
|
+
|
424
|
+
self.progress_bar = Progress(
|
425
|
+
TextColumn(" "),
|
426
|
+
TimeElapsedColumn(),
|
427
|
+
BarColumn(bar_width=None),
|
428
|
+
TextColumn("{task.percentage:.0f}% ({task.completed}/{task.total})"),
|
429
|
+
console=self.console,
|
430
|
+
)
|
431
|
+
self.progress_task_id = None
|
432
|
+
|
433
|
+
self.operations_progress = Progress(
|
434
|
+
TextColumn(" "),
|
435
|
+
SpinnerColumn("dots"),
|
436
|
+
TimeElapsedColumn(),
|
437
|
+
TextColumn(" {task.fields[label]}"),
|
438
|
+
console=self.console,
|
439
|
+
)
|
440
|
+
|
441
|
+
self.current_operations = {}
|
442
|
+
|
443
|
+
self.stats_progress = Progress(
|
444
|
+
TextColumn(" "),
|
445
|
+
TextColumn("{task.description}"),
|
446
|
+
console=self.console,
|
447
|
+
)
|
448
|
+
self.stats_task_id = self.stats_progress.add_task("")
|
449
|
+
self.stats = {
|
450
|
+
Status.SUCCESS: 0,
|
451
|
+
Status.FAILURE: 0,
|
452
|
+
Status.SKIP: 0,
|
453
|
+
Status.ERROR: 0,
|
454
|
+
Status.INTERRUPTED: 0,
|
455
|
+
}
|
456
|
+
self._update_stats_display()
|
457
|
+
|
458
|
+
self.live = None
|
459
|
+
self.is_interrupted = False
|
460
|
+
|
461
|
+
def _get_stats_message(self) -> str:
|
462
|
+
width = len(str(self.total))
|
463
|
+
|
464
|
+
parts = []
|
465
|
+
if self.stats[Status.SUCCESS]:
|
466
|
+
parts.append(f"✅ {self.stats[Status.SUCCESS]:{width}d} passed")
|
467
|
+
if self.stats[Status.FAILURE]:
|
468
|
+
parts.append(f"❌ {self.stats[Status.FAILURE]:{width}d} failed")
|
469
|
+
if self.stats[Status.ERROR]:
|
470
|
+
suffix = "s" if self.stats[Status.ERROR] > 1 else ""
|
471
|
+
parts.append(f"🚫 {self.stats[Status.ERROR]:{width}d} error{suffix}")
|
472
|
+
if self.stats[Status.SKIP] or self.stats[Status.INTERRUPTED]:
|
473
|
+
parts.append(f"⏭ {self.stats[Status.SKIP] + self.stats[Status.INTERRUPTED]:{width}d} skipped")
|
474
|
+
return " ".join(parts)
|
475
|
+
|
476
|
+
def _update_stats_display(self) -> None:
|
477
|
+
"""Update the statistics display."""
|
478
|
+
self.stats_progress.update(self.stats_task_id, description=self._get_stats_message())
|
479
|
+
|
480
|
+
def start(self) -> None:
|
481
|
+
"""Start progress display."""
|
482
|
+
from rich.console import Group
|
483
|
+
from rich.live import Live
|
484
|
+
from rich.text import Text
|
485
|
+
|
486
|
+
group = Group(
|
487
|
+
self.title_progress,
|
488
|
+
Text(),
|
489
|
+
self.progress_bar,
|
490
|
+
Text(),
|
491
|
+
self.operations_progress,
|
492
|
+
Text(),
|
493
|
+
self.stats_progress,
|
494
|
+
)
|
495
|
+
|
496
|
+
self.live = Live(group, refresh_per_second=10, console=self.console, transient=True)
|
497
|
+
self.live.start()
|
498
|
+
|
499
|
+
# Initialize both progress displays
|
500
|
+
self.title_task_id = self.title_progress.add_task(self.title, total=self.total)
|
501
|
+
self.progress_task_id = self.progress_bar.add_task(
|
502
|
+
"", # Empty description as it's shown in title
|
503
|
+
total=self.total,
|
504
|
+
)
|
505
|
+
|
506
|
+
def update_progress(self) -> None:
|
507
|
+
"""Update progress in both displays."""
|
508
|
+
assert self.title_task_id is not None
|
509
|
+
assert self.progress_task_id is not None
|
510
|
+
|
511
|
+
self.current += 1
|
512
|
+
self.title_progress.update(self.title_task_id, completed=self.current)
|
513
|
+
self.progress_bar.update(self.progress_task_id, completed=self.current)
|
514
|
+
|
515
|
+
def start_operation(self, label: str) -> None:
|
516
|
+
"""Start tracking new operation."""
|
517
|
+
task_id = self.operations_progress.add_task("", label=label, start_time=time.monotonic())
|
518
|
+
self.current_operations[label] = OperationProgress(label=label, start_time=time.monotonic(), task_id=task_id)
|
519
|
+
|
520
|
+
def finish_operation(self, label: str) -> None:
|
521
|
+
"""Finish tracking operation."""
|
522
|
+
if operation := self.current_operations.pop(label, None):
|
523
|
+
if not self.current_operations:
|
524
|
+
assert self.title_task_id is not None
|
525
|
+
if self.current == self.total - 1:
|
526
|
+
description = f" {self.title}"
|
527
|
+
else:
|
528
|
+
description = self.title
|
529
|
+
self.title_progress.update(self.title_task_id, description=description)
|
530
|
+
self.operations_progress.update(operation.task_id, visible=False)
|
531
|
+
|
532
|
+
def update_stats(self, status: Status) -> None:
|
533
|
+
"""Update statistics for a finished scenario."""
|
534
|
+
self.stats[status] += 1
|
535
|
+
self._update_stats_display()
|
536
|
+
|
537
|
+
def interrupt(self) -> None:
|
538
|
+
self.is_interrupted = True
|
539
|
+
self.stats[Status.SKIP] += self.total - self.current
|
540
|
+
if self.live:
|
541
|
+
self.stop()
|
542
|
+
|
543
|
+
def stop(self) -> None:
|
544
|
+
"""Stop all progress displays."""
|
545
|
+
if self.live:
|
546
|
+
self.live.stop()
|
547
|
+
|
548
|
+
def _get_status_icon(self, default_icon: str = "🕛") -> str:
|
549
|
+
if self.is_interrupted:
|
550
|
+
icon = "⚡"
|
551
|
+
elif self.stats[Status.ERROR] > 0:
|
552
|
+
icon = "🚫"
|
553
|
+
elif self.stats[Status.FAILURE] > 0:
|
554
|
+
icon = "❌"
|
555
|
+
elif self.stats[Status.SUCCESS] > 0:
|
556
|
+
icon = "✅"
|
557
|
+
elif self.stats[Status.SKIP] > 0:
|
558
|
+
icon = "⏭ "
|
559
|
+
else:
|
560
|
+
icon = default_icon
|
561
|
+
return icon
|
562
|
+
|
563
|
+
def get_completion_message(self, default_icon: str = "🕛") -> str:
|
564
|
+
"""Complete the phase and return status message."""
|
565
|
+
duration = format_duration(int((time.monotonic() - self.start_time) * 1000))
|
566
|
+
icon = self._get_status_icon(default_icon)
|
567
|
+
|
568
|
+
message = self._get_stats_message() or "No tests were run"
|
569
|
+
if self.is_interrupted:
|
570
|
+
duration_message = f"interrupted after {duration}"
|
571
|
+
else:
|
572
|
+
duration_message = f"in {duration}"
|
573
|
+
|
574
|
+
return f"{icon} {self.title} ({duration_message})\n\n {message}"
|
575
|
+
|
576
|
+
|
577
|
+
@dataclass
|
578
|
+
class StatefulProgressManager:
|
579
|
+
"""Manages progress display for stateful testing."""
|
580
|
+
|
581
|
+
console: Console
|
582
|
+
title: str
|
583
|
+
links_selected: int
|
584
|
+
links_total: int
|
585
|
+
start_time: float
|
586
|
+
|
587
|
+
# Progress components
|
588
|
+
title_progress: Progress
|
589
|
+
progress_bar: Progress
|
590
|
+
stats_progress: Progress
|
591
|
+
live: Live | None
|
592
|
+
|
593
|
+
# Task IDs
|
594
|
+
title_task_id: TaskID | None
|
595
|
+
progress_task_id: TaskID | None
|
596
|
+
stats_task_id: TaskID
|
597
|
+
|
598
|
+
# State
|
599
|
+
scenarios: int
|
600
|
+
links_covered: set[str]
|
601
|
+
stats: dict[Status, int]
|
602
|
+
is_interrupted: bool
|
603
|
+
|
604
|
+
__slots__ = (
|
605
|
+
"console",
|
606
|
+
"title",
|
607
|
+
"links_selected",
|
608
|
+
"links_total",
|
609
|
+
"start_time",
|
610
|
+
"title_progress",
|
611
|
+
"progress_bar",
|
612
|
+
"stats_progress",
|
613
|
+
"live",
|
614
|
+
"title_task_id",
|
615
|
+
"progress_task_id",
|
616
|
+
"stats_task_id",
|
617
|
+
"scenarios",
|
618
|
+
"links_covered",
|
619
|
+
"stats",
|
620
|
+
"is_interrupted",
|
621
|
+
)
|
622
|
+
|
623
|
+
def __init__(self, *, console: Console, title: str, links_selected: int, links_total: int) -> None:
|
624
|
+
from rich.progress import Progress, SpinnerColumn, TextColumn, TimeElapsedColumn
|
625
|
+
from rich.style import Style
|
626
|
+
|
627
|
+
self.console = console
|
628
|
+
self.title = title
|
629
|
+
self.links_selected = links_selected
|
630
|
+
self.links_total = links_total
|
631
|
+
self.start_time = time.monotonic()
|
632
|
+
|
633
|
+
self.title_progress = Progress(
|
634
|
+
TextColumn(""),
|
635
|
+
SpinnerColumn("clock"),
|
636
|
+
TextColumn("{task.description}", style=Style(color="bright_white")),
|
637
|
+
console=self.console,
|
638
|
+
)
|
639
|
+
self.title_task_id = None
|
640
|
+
|
641
|
+
self.progress_bar = Progress(
|
642
|
+
TextColumn(" "),
|
643
|
+
TimeElapsedColumn(),
|
644
|
+
TextColumn("{task.fields[scenarios]:3d} scenarios • {task.fields[links]}"),
|
645
|
+
console=self.console,
|
646
|
+
)
|
647
|
+
self.progress_task_id = None
|
648
|
+
|
649
|
+
# Initialize stats progress
|
650
|
+
self.stats_progress = Progress(
|
651
|
+
TextColumn(" "),
|
652
|
+
TextColumn("{task.description}"),
|
653
|
+
console=self.console,
|
654
|
+
)
|
655
|
+
self.stats_task_id = self.stats_progress.add_task("")
|
656
|
+
|
657
|
+
self.live = None
|
658
|
+
|
659
|
+
# Initialize state
|
660
|
+
self.scenarios = 0
|
661
|
+
self.links_covered = set()
|
662
|
+
self.stats = {
|
663
|
+
Status.SUCCESS: 0,
|
664
|
+
Status.FAILURE: 0,
|
665
|
+
Status.ERROR: 0,
|
666
|
+
Status.SKIP: 0,
|
667
|
+
}
|
668
|
+
self.is_interrupted = False
|
669
|
+
|
670
|
+
def start(self) -> None:
|
671
|
+
"""Start progress display."""
|
672
|
+
from rich.console import Group
|
673
|
+
from rich.live import Live
|
674
|
+
from rich.text import Text
|
675
|
+
|
676
|
+
# Initialize progress displays
|
677
|
+
self.title_task_id = self.title_progress.add_task("Stateful")
|
678
|
+
self.progress_task_id = self.progress_bar.add_task(
|
679
|
+
"", scenarios=0, links=f"0 covered / {self.links_selected} selected / {self.links_total} total links"
|
680
|
+
)
|
681
|
+
|
682
|
+
# Create live display
|
683
|
+
group = Group(
|
684
|
+
self.title_progress,
|
685
|
+
Text(),
|
686
|
+
self.progress_bar,
|
687
|
+
Text(),
|
688
|
+
self.stats_progress,
|
689
|
+
)
|
690
|
+
self.live = Live(group, refresh_per_second=10, console=self.console, transient=True)
|
691
|
+
self.live.start()
|
692
|
+
|
693
|
+
def stop(self) -> None:
|
694
|
+
"""Stop progress display."""
|
695
|
+
if self.live:
|
696
|
+
self.live.stop()
|
697
|
+
|
698
|
+
def update(self, links_covered: set[str], status: Status | None = None) -> None:
|
699
|
+
"""Update progress and stats."""
|
700
|
+
self.scenarios += 1
|
701
|
+
self.links_covered.update(links_covered)
|
702
|
+
|
703
|
+
if status is not None:
|
704
|
+
self.stats[status] += 1
|
705
|
+
|
706
|
+
self._update_progress_display()
|
707
|
+
self._update_stats_display()
|
708
|
+
|
709
|
+
def _update_progress_display(self) -> None:
|
710
|
+
"""Update the progress display."""
|
711
|
+
assert self.progress_task_id is not None
|
712
|
+
self.progress_bar.update(
|
713
|
+
self.progress_task_id,
|
714
|
+
scenarios=self.scenarios,
|
715
|
+
links=f"{len(self.links_covered)} covered / {self.links_selected} selected / {self.links_total} total links",
|
716
|
+
)
|
717
|
+
|
718
|
+
def _get_stats_message(self) -> str:
|
719
|
+
"""Get formatted stats message."""
|
720
|
+
parts = []
|
721
|
+
if self.stats[Status.SUCCESS]:
|
722
|
+
parts.append(f"✅ {self.stats[Status.SUCCESS]} passed")
|
723
|
+
if self.stats[Status.FAILURE]:
|
724
|
+
parts.append(f"❌ {self.stats[Status.FAILURE]} failed")
|
725
|
+
if self.stats[Status.ERROR]:
|
726
|
+
suffix = "s" if self.stats[Status.ERROR] > 1 else ""
|
727
|
+
parts.append(f"🚫 {self.stats[Status.ERROR]} error{suffix}")
|
728
|
+
if self.stats[Status.SKIP]:
|
729
|
+
parts.append(f"⏭ {self.stats[Status.SKIP]} skipped")
|
730
|
+
return " ".join(parts)
|
731
|
+
|
732
|
+
def _update_stats_display(self) -> None:
|
733
|
+
"""Update the statistics display."""
|
734
|
+
self.stats_progress.update(self.stats_task_id, description=self._get_stats_message())
|
735
|
+
|
736
|
+
def _get_status_icon(self, default_icon: str = "🕛") -> str:
|
737
|
+
if self.is_interrupted:
|
738
|
+
icon = "⚡"
|
739
|
+
elif self.stats[Status.ERROR] > 0:
|
740
|
+
icon = "🚫"
|
741
|
+
elif self.stats[Status.FAILURE] > 0:
|
742
|
+
icon = "❌"
|
743
|
+
elif self.stats[Status.SUCCESS] > 0:
|
744
|
+
icon = "✅"
|
745
|
+
elif self.stats[Status.SKIP] > 0:
|
746
|
+
icon = "⏭ "
|
747
|
+
else:
|
748
|
+
icon = default_icon
|
749
|
+
return icon
|
750
|
+
|
751
|
+
def interrupt(self) -> None:
|
752
|
+
"""Handle interruption."""
|
753
|
+
self.is_interrupted = True
|
754
|
+
if self.live:
|
755
|
+
self.stop()
|
756
|
+
|
757
|
+
def get_completion_message(self, icon: str | None = None) -> tuple[str, str]:
|
758
|
+
"""Complete the phase and return status message."""
|
759
|
+
duration = format_duration(int((time.monotonic() - self.start_time) * 1000))
|
760
|
+
icon = icon or self._get_status_icon()
|
761
|
+
|
762
|
+
message = self._get_stats_message() or "No tests were run"
|
763
|
+
if self.is_interrupted:
|
764
|
+
duration_message = f"interrupted after {duration}"
|
765
|
+
else:
|
766
|
+
duration_message = f"in {duration}"
|
767
|
+
|
768
|
+
return f"{icon} {self.title} ({duration_message})", message
|
769
|
+
|
770
|
+
|
771
|
+
def format_duration(duration_ms: int) -> str:
|
772
|
+
"""Format duration in milliseconds to seconds with 2 decimal places."""
|
773
|
+
return f"{duration_ms / 1000:.2f}s"
|
774
|
+
|
775
|
+
|
776
|
+
@dataclass
|
777
|
+
class OutputHandler(EventHandler):
|
778
|
+
config: ProjectConfig
|
779
|
+
|
780
|
+
loading_manager: LoadingProgressManager | None = None
|
781
|
+
probing_manager: ProbingProgressManager | None = None
|
782
|
+
unit_tests_manager: UnitTestProgressManager | None = None
|
783
|
+
stateful_tests_manager: StatefulProgressManager | None = None
|
784
|
+
|
785
|
+
statistic: ApiStatistic | None = None
|
786
|
+
skip_reasons: list[str] = field(default_factory=list)
|
787
|
+
warnings: WarningData = field(default_factory=WarningData)
|
788
|
+
errors: set[events.NonFatalError] = field(default_factory=set)
|
789
|
+
phases: dict[PhaseName, tuple[Status, PhaseSkipReason | None]] = field(
|
790
|
+
default_factory=lambda: dict.fromkeys(PhaseName, (Status.SKIP, None))
|
791
|
+
)
|
792
|
+
console: Console = field(default_factory=_default_console)
|
793
|
+
|
794
|
+
def handle_event(self, ctx: ExecutionContext, event: events.EngineEvent) -> None:
|
795
|
+
if isinstance(event, events.PhaseStarted):
|
796
|
+
self._on_phase_started(event)
|
797
|
+
elif isinstance(event, events.PhaseFinished):
|
798
|
+
self._on_phase_finished(event)
|
799
|
+
elif isinstance(event, events.ScenarioStarted):
|
800
|
+
self._on_scenario_started(event)
|
801
|
+
elif isinstance(event, events.ScenarioFinished):
|
802
|
+
self._on_scenario_finished(ctx, event)
|
803
|
+
if isinstance(event, events.EngineFinished):
|
804
|
+
self._on_engine_finished(ctx, event)
|
805
|
+
elif isinstance(event, events.Interrupted):
|
806
|
+
self._on_interrupted(event)
|
807
|
+
elif isinstance(event, events.FatalError):
|
808
|
+
self._on_fatal_error(ctx, event)
|
809
|
+
elif isinstance(event, events.NonFatalError):
|
810
|
+
self.errors.add(event)
|
811
|
+
elif isinstance(event, LoadingStarted):
|
812
|
+
self._on_loading_started(event)
|
813
|
+
elif isinstance(event, LoadingFinished):
|
814
|
+
self._on_loading_finished(ctx, event)
|
815
|
+
|
816
|
+
def start(self, ctx: ExecutionContext) -> None:
|
817
|
+
display_header(SCHEMATHESIS_VERSION)
|
818
|
+
|
819
|
+
def shutdown(self, ctx: ExecutionContext) -> None:
|
820
|
+
if self.unit_tests_manager is not None:
|
821
|
+
self.unit_tests_manager.stop()
|
822
|
+
if self.stateful_tests_manager is not None:
|
823
|
+
self.stateful_tests_manager.stop()
|
824
|
+
if self.loading_manager is not None:
|
825
|
+
self.loading_manager.stop()
|
826
|
+
if self.probing_manager is not None:
|
827
|
+
self.probing_manager.stop()
|
828
|
+
|
829
|
+
def _on_loading_started(self, event: LoadingStarted) -> None:
|
830
|
+
self.loading_manager = LoadingProgressManager(console=self.console, location=event.location)
|
831
|
+
self.loading_manager.start()
|
832
|
+
|
833
|
+
def _on_loading_finished(self, ctx: ExecutionContext, event: LoadingFinished) -> None:
|
834
|
+
from rich.padding import Padding
|
835
|
+
from rich.style import Style
|
836
|
+
from rich.table import Table
|
837
|
+
|
838
|
+
self.config = event.config
|
839
|
+
|
840
|
+
assert self.loading_manager is not None
|
841
|
+
self.loading_manager.stop()
|
842
|
+
|
843
|
+
message = Padding(
|
844
|
+
self.loading_manager.get_completion_message(),
|
845
|
+
BLOCK_PADDING,
|
846
|
+
)
|
847
|
+
self.console.print(message)
|
848
|
+
self.console.print()
|
849
|
+
self.loading_manager = None
|
850
|
+
self.statistic = event.statistic
|
851
|
+
|
852
|
+
table = Table(
|
853
|
+
show_header=False,
|
854
|
+
box=None,
|
855
|
+
padding=(0, 4),
|
856
|
+
collapse_padding=True,
|
857
|
+
)
|
858
|
+
table.add_column("Field", style=Style(color="bright_white", bold=True))
|
859
|
+
table.add_column("Value", style="cyan")
|
860
|
+
|
861
|
+
table.add_row("Base URL:", event.base_url)
|
862
|
+
table.add_row("Specification:", event.specification.name)
|
863
|
+
statistic = event.statistic.operations
|
864
|
+
table.add_row("Operations:", f"{statistic.selected} selected / {statistic.total} total")
|
865
|
+
|
866
|
+
message = Padding(table, BLOCK_PADDING)
|
867
|
+
self.console.print(message)
|
868
|
+
self.console.print()
|
869
|
+
|
870
|
+
if ctx.initialization_lines:
|
871
|
+
_print_lines(ctx.initialization_lines)
|
872
|
+
|
873
|
+
def _on_phase_started(self, event: events.PhaseStarted) -> None:
|
874
|
+
phase = event.phase
|
875
|
+
if phase.name == PhaseName.PROBING and phase.is_enabled:
|
876
|
+
self._start_probing()
|
877
|
+
elif phase.name in [PhaseName.EXAMPLES, PhaseName.COVERAGE, PhaseName.FUZZING] and phase.is_enabled:
|
878
|
+
self._start_unit_tests(phase.name)
|
879
|
+
elif phase.name == PhaseName.STATEFUL_TESTING and phase.is_enabled and phase.skip_reason is None:
|
880
|
+
self._start_stateful_tests()
|
881
|
+
|
882
|
+
def _start_probing(self) -> None:
|
883
|
+
self.probing_manager = ProbingProgressManager(console=self.console)
|
884
|
+
self.probing_manager.start()
|
885
|
+
|
886
|
+
def _start_unit_tests(self, phase: PhaseName) -> None:
|
887
|
+
assert self.statistic is not None
|
888
|
+
assert self.unit_tests_manager is None
|
889
|
+
self.unit_tests_manager = UnitTestProgressManager(
|
890
|
+
console=self.console,
|
891
|
+
title=phase.value,
|
892
|
+
total=self.statistic.operations.selected,
|
893
|
+
)
|
894
|
+
self.unit_tests_manager.start()
|
895
|
+
|
896
|
+
def _start_stateful_tests(self) -> None:
|
897
|
+
assert self.statistic is not None
|
898
|
+
self.stateful_tests_manager = StatefulProgressManager(
|
899
|
+
console=self.console,
|
900
|
+
title="Stateful",
|
901
|
+
links_selected=self.statistic.links.selected,
|
902
|
+
links_total=self.statistic.links.total,
|
903
|
+
)
|
904
|
+
self.stateful_tests_manager.start()
|
905
|
+
|
906
|
+
def _on_phase_finished(self, event: events.PhaseFinished) -> None:
|
907
|
+
from rich.padding import Padding
|
908
|
+
from rich.style import Style
|
909
|
+
from rich.table import Table
|
910
|
+
from rich.text import Text
|
911
|
+
|
912
|
+
phase = event.phase
|
913
|
+
self.phases[phase.name] = (event.status, phase.skip_reason)
|
914
|
+
|
915
|
+
if phase.name == PhaseName.PROBING:
|
916
|
+
assert self.probing_manager is not None
|
917
|
+
self.probing_manager.stop()
|
918
|
+
self.probing_manager = None
|
919
|
+
|
920
|
+
if event.status == Status.SUCCESS:
|
921
|
+
assert isinstance(event.payload, Ok)
|
922
|
+
payload = event.payload.ok()
|
923
|
+
self.console.print(
|
924
|
+
Padding(
|
925
|
+
Text.assemble(
|
926
|
+
("✅ ", Style(color="green")),
|
927
|
+
("API capabilities:", Style(color="bright_white")),
|
928
|
+
),
|
929
|
+
BLOCK_PADDING,
|
930
|
+
)
|
931
|
+
)
|
932
|
+
self.console.print()
|
933
|
+
|
934
|
+
table = Table(
|
935
|
+
show_header=False,
|
936
|
+
box=None,
|
937
|
+
padding=(0, 4),
|
938
|
+
collapse_padding=True,
|
939
|
+
)
|
940
|
+
table.add_column("Capability", style=Style(color="bright_white", bold=True))
|
941
|
+
table.add_column("Status", style="cyan")
|
942
|
+
for probe_run in payload.probes:
|
943
|
+
icon, style = {
|
944
|
+
ProbeOutcome.SUCCESS: ("✓", Style(color="green")),
|
945
|
+
ProbeOutcome.FAILURE: ("✘", Style(color="red")),
|
946
|
+
ProbeOutcome.SKIP: ("⊘", Style(color="yellow")),
|
947
|
+
ProbeOutcome.ERROR: ("⚠", Style(color="yellow")),
|
948
|
+
}[probe_run.outcome]
|
949
|
+
|
950
|
+
table.add_row(f"{probe_run.probe.name}:", Text(icon, style=style))
|
951
|
+
|
952
|
+
message = Padding(table, BLOCK_PADDING)
|
953
|
+
elif event.status == Status.SKIP:
|
954
|
+
message = Padding(
|
955
|
+
Text.assemble(
|
956
|
+
("⏭ ", ""),
|
957
|
+
("API probing skipped", Style(color="yellow")),
|
958
|
+
),
|
959
|
+
BLOCK_PADDING,
|
960
|
+
)
|
961
|
+
else:
|
962
|
+
assert event.status == Status.ERROR
|
963
|
+
assert isinstance(event.payload, Err)
|
964
|
+
error = EngineErrorInfo(event.payload.err())
|
965
|
+
message = Padding(
|
966
|
+
Text.assemble(
|
967
|
+
("🚫 ", ""),
|
968
|
+
(f"API probing failed: {error.message}", Style(color="red")),
|
969
|
+
),
|
970
|
+
BLOCK_PADDING,
|
971
|
+
)
|
972
|
+
self.console.print(message)
|
973
|
+
self.console.print()
|
974
|
+
elif phase.name == PhaseName.STATEFUL_TESTING and phase.is_enabled and self.stateful_tests_manager is not None:
|
975
|
+
self.stateful_tests_manager.stop()
|
976
|
+
if event.status == Status.ERROR:
|
977
|
+
title, summary = self.stateful_tests_manager.get_completion_message("🚫")
|
978
|
+
else:
|
979
|
+
title, summary = self.stateful_tests_manager.get_completion_message()
|
980
|
+
|
981
|
+
self.console.print(Padding(Text(title, style="bright_white"), BLOCK_PADDING))
|
982
|
+
|
983
|
+
table = Table(
|
984
|
+
show_header=False,
|
985
|
+
box=None,
|
986
|
+
padding=(0, 4),
|
987
|
+
collapse_padding=True,
|
988
|
+
)
|
989
|
+
table.add_column("Field", style=Style(color="bright_white", bold=True))
|
990
|
+
table.add_column("Value", style="cyan")
|
991
|
+
table.add_row("Scenarios:", f"{self.stateful_tests_manager.scenarios}")
|
992
|
+
table.add_row(
|
993
|
+
"API Links:",
|
994
|
+
f"{len(self.stateful_tests_manager.links_covered)} covered / {self.stateful_tests_manager.links_selected} selected / {self.stateful_tests_manager.links_total} total",
|
995
|
+
)
|
996
|
+
|
997
|
+
self.console.print()
|
998
|
+
self.console.print(Padding(table, BLOCK_PADDING))
|
999
|
+
self.console.print()
|
1000
|
+
self.console.print(Padding(Text(summary, style="bright_white"), (0, 0, 0, 5)))
|
1001
|
+
self.console.print()
|
1002
|
+
self.stateful_tests_manager = None
|
1003
|
+
elif (
|
1004
|
+
phase.name in [PhaseName.EXAMPLES, PhaseName.COVERAGE, PhaseName.FUZZING]
|
1005
|
+
and phase.is_enabled
|
1006
|
+
and self.unit_tests_manager is not None
|
1007
|
+
):
|
1008
|
+
self.unit_tests_manager.stop()
|
1009
|
+
if event.status == Status.ERROR:
|
1010
|
+
message = self.unit_tests_manager.get_completion_message("🚫")
|
1011
|
+
else:
|
1012
|
+
message = self.unit_tests_manager.get_completion_message()
|
1013
|
+
self.console.print(Padding(Text(message, style="white"), BLOCK_PADDING))
|
1014
|
+
self.console.print()
|
1015
|
+
self.unit_tests_manager = None
|
1016
|
+
|
1017
|
+
def _on_scenario_started(self, event: events.ScenarioStarted) -> None:
|
1018
|
+
if event.phase in [PhaseName.EXAMPLES, PhaseName.COVERAGE, PhaseName.FUZZING]:
|
1019
|
+
# We should display execution result + percentage in the end. For example:
|
1020
|
+
assert event.label is not None
|
1021
|
+
assert self.unit_tests_manager is not None
|
1022
|
+
self.unit_tests_manager.start_operation(event.label)
|
1023
|
+
|
1024
|
+
def _on_scenario_finished(self, ctx: ExecutionContext, event: events.ScenarioFinished) -> None:
|
1025
|
+
if event.phase in [PhaseName.EXAMPLES, PhaseName.COVERAGE, PhaseName.FUZZING]:
|
1026
|
+
assert self.unit_tests_manager is not None
|
1027
|
+
if event.label:
|
1028
|
+
self.unit_tests_manager.finish_operation(event.label)
|
1029
|
+
self.unit_tests_manager.update_progress()
|
1030
|
+
self.unit_tests_manager.update_stats(event.status)
|
1031
|
+
if event.status == Status.SKIP and event.skip_reason is not None:
|
1032
|
+
self.skip_reasons.append(event.skip_reason)
|
1033
|
+
self._check_warnings(ctx, event)
|
1034
|
+
elif (
|
1035
|
+
event.phase == PhaseName.STATEFUL_TESTING
|
1036
|
+
and not event.is_final
|
1037
|
+
and event.status not in (Status.INTERRUPTED, Status.SKIP, None)
|
1038
|
+
):
|
1039
|
+
assert self.stateful_tests_manager is not None
|
1040
|
+
links_seen = {case.transition.id for case in event.recorder.cases.values() if case.transition is not None}
|
1041
|
+
self.stateful_tests_manager.update(links_seen, event.status)
|
1042
|
+
self._check_stateful_warnings(ctx, event)
|
1043
|
+
|
1044
|
+
def _check_warnings(self, ctx: ExecutionContext, event: events.ScenarioFinished) -> None:
|
1045
|
+
statistic = aggregate_status_codes(event.recorder.interactions.values())
|
1046
|
+
|
1047
|
+
if statistic.total == 0:
|
1048
|
+
return
|
1049
|
+
|
1050
|
+
assert ctx.find_operation_by_label is not None
|
1051
|
+
assert event.label is not None
|
1052
|
+
operation = ctx.find_operation_by_label(event.label)
|
1053
|
+
|
1054
|
+
warnings = self.config.warnings_for(operation=operation)
|
1055
|
+
|
1056
|
+
if SchemathesisWarning.MISSING_AUTH in warnings:
|
1057
|
+
for status_code in (401, 403):
|
1058
|
+
if statistic.ratio_for(status_code) >= AUTH_ERRORS_THRESHOLD:
|
1059
|
+
self.warnings.missing_auth.setdefault(status_code, set()).add(event.recorder.label)
|
1060
|
+
|
1061
|
+
# Warn if all positive test cases got 4xx in return and no failure was found
|
1062
|
+
def all_positive_are_rejected(recorder: ScenarioRecorder) -> bool:
|
1063
|
+
seen_positive = False
|
1064
|
+
for case in recorder.cases.values():
|
1065
|
+
if not (case.value.meta is not None and case.value.meta.generation.mode == GenerationMode.POSITIVE):
|
1066
|
+
continue
|
1067
|
+
seen_positive = True
|
1068
|
+
interaction = recorder.interactions.get(case.value.id)
|
1069
|
+
if not (interaction is not None and interaction.response is not None):
|
1070
|
+
continue
|
1071
|
+
# At least one positive response for positive test case
|
1072
|
+
if 200 <= interaction.response.status_code < 300:
|
1073
|
+
return False
|
1074
|
+
# If there are positive test cases, and we ended up here, then there are no 2xx responses for them
|
1075
|
+
# Otherwise, there are no positive test cases at all and this check should pass
|
1076
|
+
return seen_positive
|
1077
|
+
|
1078
|
+
if (
|
1079
|
+
event.status == Status.SUCCESS
|
1080
|
+
and (
|
1081
|
+
SchemathesisWarning.MISSING_TEST_DATA in warnings or SchemathesisWarning.VALIDATION_MISMATCH in warnings
|
1082
|
+
)
|
1083
|
+
and GenerationMode.POSITIVE in self.config.generation_for(operation=operation, phase=event.phase.name).modes
|
1084
|
+
and all_positive_are_rejected(event.recorder)
|
1085
|
+
):
|
1086
|
+
if SchemathesisWarning.MISSING_TEST_DATA in warnings and statistic.should_warn_about_missing_test_data():
|
1087
|
+
self.warnings.missing_test_data.add(event.recorder.label)
|
1088
|
+
if (
|
1089
|
+
SchemathesisWarning.VALIDATION_MISMATCH in warnings
|
1090
|
+
and statistic.should_warn_about_validation_mismatch()
|
1091
|
+
):
|
1092
|
+
self.warnings.validation_mismatch.add(event.recorder.label)
|
1093
|
+
|
1094
|
+
def _check_stateful_warnings(self, ctx: ExecutionContext, event: events.ScenarioFinished) -> None:
|
1095
|
+
# If stateful testing had successful responses for API operations that were marked with "missing_test_data"
|
1096
|
+
# warnings, then remove them from warnings
|
1097
|
+
for key, node in event.recorder.cases.items():
|
1098
|
+
if not self.warnings.missing_test_data:
|
1099
|
+
break
|
1100
|
+
if node.value.operation.label in self.warnings.missing_test_data and key in event.recorder.interactions:
|
1101
|
+
response = event.recorder.interactions[key].response
|
1102
|
+
if response is not None and response.status_code < 300:
|
1103
|
+
self.warnings.missing_test_data.remove(node.value.operation.label)
|
1104
|
+
continue
|
1105
|
+
|
1106
|
+
def _on_interrupted(self, event: events.Interrupted) -> None:
|
1107
|
+
from rich.padding import Padding
|
1108
|
+
|
1109
|
+
if self.unit_tests_manager is not None:
|
1110
|
+
self.unit_tests_manager.interrupt()
|
1111
|
+
elif self.stateful_tests_manager is not None:
|
1112
|
+
self.stateful_tests_manager.interrupt()
|
1113
|
+
elif self.loading_manager is not None:
|
1114
|
+
self.loading_manager.interrupt()
|
1115
|
+
message = Padding(
|
1116
|
+
self.loading_manager.get_completion_message(),
|
1117
|
+
BLOCK_PADDING,
|
1118
|
+
)
|
1119
|
+
self.console.print(message)
|
1120
|
+
self.console.print()
|
1121
|
+
elif self.probing_manager is not None:
|
1122
|
+
self.probing_manager.interrupt()
|
1123
|
+
message = Padding(
|
1124
|
+
self.probing_manager.get_completion_message(),
|
1125
|
+
BLOCK_PADDING,
|
1126
|
+
)
|
1127
|
+
self.console.print(message)
|
1128
|
+
self.console.print()
|
1129
|
+
self.probing_manager = None
|
1130
|
+
|
1131
|
+
def _on_fatal_error(self, ctx: ExecutionContext, event: events.FatalError) -> None:
|
1132
|
+
from rich.padding import Padding
|
1133
|
+
from rich.text import Text
|
1134
|
+
|
1135
|
+
self.shutdown(ctx)
|
1136
|
+
|
1137
|
+
if isinstance(event.exception, LoaderError):
|
1138
|
+
assert self.loading_manager is not None
|
1139
|
+
message = Padding(self.loading_manager.get_error_message(event.exception), BLOCK_PADDING)
|
1140
|
+
self.console.print(message)
|
1141
|
+
self.console.print()
|
1142
|
+
self.loading_manager = None
|
1143
|
+
|
1144
|
+
if event.exception.extras:
|
1145
|
+
for extra in event.exception.extras:
|
1146
|
+
self.console.print(Padding(Text(extra), (0, 0, 0, 5)))
|
1147
|
+
self.console.print()
|
1148
|
+
|
1149
|
+
if not (
|
1150
|
+
event.exception.kind == LoaderErrorKind.CONNECTION_OTHER and self.config.wait_for_schema is not None
|
1151
|
+
):
|
1152
|
+
suggestion = LOADER_ERROR_SUGGESTIONS.get(event.exception.kind)
|
1153
|
+
if suggestion is not None:
|
1154
|
+
click.echo(_style(f"{click.style('Tip:', bold=True, fg='green')} {suggestion}"))
|
1155
|
+
|
1156
|
+
raise click.Abort
|
1157
|
+
title = "Test Execution Error"
|
1158
|
+
message = DEFAULT_INTERNAL_ERROR_MESSAGE
|
1159
|
+
traceback = format_exception(event.exception, with_traceback=True)
|
1160
|
+
extras = split_traceback(traceback)
|
1161
|
+
suggestion = f"Please consider reporting the traceback above to our issue tracker:\n\n {ISSUE_TRACKER_URL}."
|
1162
|
+
click.echo(_style(title, fg="red", bold=True))
|
1163
|
+
click.echo()
|
1164
|
+
click.echo(message)
|
1165
|
+
_display_extras(extras)
|
1166
|
+
if not (
|
1167
|
+
isinstance(event.exception, LoaderError)
|
1168
|
+
and event.exception.kind == LoaderErrorKind.CONNECTION_OTHER
|
1169
|
+
and self.config.wait_for_schema is not None
|
1170
|
+
):
|
1171
|
+
click.echo(_style(f"\n{click.style('Tip:', bold=True, fg='green')} {suggestion}"))
|
1172
|
+
|
1173
|
+
raise click.Abort
|
1174
|
+
|
1175
|
+
def _display_warning_block(
|
1176
|
+
self, title: str, operations: set[str] | dict, tips: list[str], operation_suffix: str = ""
|
1177
|
+
) -> None:
|
1178
|
+
if isinstance(operations, dict):
|
1179
|
+
total = sum(len(ops) for ops in operations.values())
|
1180
|
+
else:
|
1181
|
+
total = len(operations)
|
1182
|
+
|
1183
|
+
suffix = "" if total == 1 else "s"
|
1184
|
+
click.echo(
|
1185
|
+
_style(
|
1186
|
+
f"{title}: {total} operation{suffix}{operation_suffix}\n",
|
1187
|
+
fg="yellow",
|
1188
|
+
)
|
1189
|
+
)
|
1190
|
+
|
1191
|
+
# Print up to 3 endpoints, then "+N more"
|
1192
|
+
def _print_up_to_three(operations_: list[str] | set[str]) -> None:
|
1193
|
+
for operation in sorted(operations_)[:3]:
|
1194
|
+
click.echo(_style(f" - {operation}", fg="yellow"))
|
1195
|
+
extra_count = len(operations_) - 3
|
1196
|
+
if extra_count > 0:
|
1197
|
+
click.echo(_style(f" + {extra_count} more", fg="yellow"))
|
1198
|
+
|
1199
|
+
if isinstance(operations, dict):
|
1200
|
+
for status_code, ops in operations.items():
|
1201
|
+
status_text = "Unauthorized" if status_code == 401 else "Forbidden"
|
1202
|
+
count = len(ops)
|
1203
|
+
suffix = "" if count == 1 else "s"
|
1204
|
+
click.echo(_style(f"{status_code} {status_text} ({count} operation{suffix}):", fg="yellow"))
|
1205
|
+
|
1206
|
+
_print_up_to_three(ops)
|
1207
|
+
else:
|
1208
|
+
_print_up_to_three(operations)
|
1209
|
+
|
1210
|
+
if tips:
|
1211
|
+
click.echo()
|
1212
|
+
|
1213
|
+
for tip in tips:
|
1214
|
+
click.echo(_style(tip, fg="yellow"))
|
1215
|
+
|
1216
|
+
click.echo()
|
1217
|
+
|
1218
|
+
def display_warnings(self) -> None:
|
1219
|
+
display_section_name("WARNINGS")
|
1220
|
+
click.echo()
|
1221
|
+
if self.warnings.missing_auth:
|
1222
|
+
self._display_warning_block(
|
1223
|
+
title="Missing authentication",
|
1224
|
+
operations=self.warnings.missing_auth,
|
1225
|
+
operation_suffix=" returned authentication errors",
|
1226
|
+
tips=["💡 Use --auth or -H to provide authentication credentials"],
|
1227
|
+
)
|
1228
|
+
|
1229
|
+
if self.warnings.missing_test_data:
|
1230
|
+
self._display_warning_block(
|
1231
|
+
title="Missing test data",
|
1232
|
+
operations=self.warnings.missing_test_data,
|
1233
|
+
operation_suffix=" repeatedly returned 404 Not Found, preventing tests from reaching your API's core logic",
|
1234
|
+
tips=[
|
1235
|
+
"💡 Provide realistic parameter values in your config file so tests can access existing resources",
|
1236
|
+
],
|
1237
|
+
)
|
1238
|
+
|
1239
|
+
if self.warnings.validation_mismatch:
|
1240
|
+
self._display_warning_block(
|
1241
|
+
title="Schema validation mismatch",
|
1242
|
+
operations=self.warnings.validation_mismatch,
|
1243
|
+
operation_suffix=" mostly rejected generated data due to validation errors, indicating schema constraints don't match API validation",
|
1244
|
+
tips=["💡 Check your schema constraints - API validation may be stricter than documented"],
|
1245
|
+
)
|
1246
|
+
|
1247
|
+
def display_stateful_failures(self, ctx: ExecutionContext) -> None:
|
1248
|
+
display_section_name("Stateful tests")
|
1249
|
+
|
1250
|
+
click.echo("\nFailed to extract data from response:")
|
1251
|
+
|
1252
|
+
grouped: dict[str, list[ExtractionFailure]] = {}
|
1253
|
+
for failure in ctx.statistic.extraction_failures:
|
1254
|
+
grouped.setdefault(failure.id, []).append(failure)
|
1255
|
+
|
1256
|
+
for idx, (transition_id, failures) in enumerate(grouped.items(), 1):
|
1257
|
+
for failure in failures:
|
1258
|
+
click.echo(f"\n {idx}. Test Case ID: {failure.case_id}\n")
|
1259
|
+
click.echo(f" {transition_id}")
|
1260
|
+
|
1261
|
+
indent = " "
|
1262
|
+
if failure.error:
|
1263
|
+
if isinstance(failure.error, JSONDecodeError):
|
1264
|
+
click.echo(f"\n{indent}Failed to parse JSON from response")
|
1265
|
+
else:
|
1266
|
+
click.echo(f"\n{indent}{failure.error.__class__.__name__}: {failure.error}")
|
1267
|
+
else:
|
1268
|
+
description = (
|
1269
|
+
f"\n{indent}Could not resolve parameter `{failure.parameter_name}` via `{failure.expression}`"
|
1270
|
+
)
|
1271
|
+
prefix = "$response.body"
|
1272
|
+
if failure.expression.startswith(prefix):
|
1273
|
+
description += f"\n{indent}Path `{failure.expression[len(prefix) :]}` not found in response"
|
1274
|
+
click.echo(description)
|
1275
|
+
|
1276
|
+
click.echo()
|
1277
|
+
|
1278
|
+
for case, response in reversed(failure.history):
|
1279
|
+
curl = case.as_curl_command(headers=dict(response.request.headers), verify=response.verify)
|
1280
|
+
click.echo(f"{indent}[{response.status_code}] {curl}")
|
1281
|
+
|
1282
|
+
response = failure.response
|
1283
|
+
|
1284
|
+
if response.content is None or not response.content:
|
1285
|
+
click.echo(f"\n{indent}<EMPTY>")
|
1286
|
+
else:
|
1287
|
+
try:
|
1288
|
+
payload = prepare_response_payload(response.text, config=ctx.config.output)
|
1289
|
+
click.echo(textwrap.indent(f"\n{payload}", prefix=indent))
|
1290
|
+
except UnicodeDecodeError:
|
1291
|
+
click.echo(f"\n{indent}<BINARY>")
|
1292
|
+
|
1293
|
+
click.echo()
|
1294
|
+
|
1295
|
+
def display_api_operations(self, ctx: ExecutionContext) -> None:
|
1296
|
+
assert self.statistic is not None
|
1297
|
+
click.echo(_style("API Operations:", bold=True))
|
1298
|
+
click.echo(
|
1299
|
+
_style(
|
1300
|
+
f" Selected: {click.style(str(self.statistic.operations.selected), bold=True)}/"
|
1301
|
+
f"{click.style(str(self.statistic.operations.total), bold=True)}"
|
1302
|
+
)
|
1303
|
+
)
|
1304
|
+
click.echo(_style(f" Tested: {click.style(str(len(ctx.statistic.tested_operations)), bold=True)}"))
|
1305
|
+
errors = len(
|
1306
|
+
{
|
1307
|
+
err.label
|
1308
|
+
for err in self.errors
|
1309
|
+
# Some API operations may have some tests before they have an error
|
1310
|
+
if err.phase in [PhaseName.EXAMPLES, PhaseName.COVERAGE, PhaseName.FUZZING]
|
1311
|
+
and err.label not in ctx.statistic.tested_operations
|
1312
|
+
and err.related_to_operation
|
1313
|
+
}
|
1314
|
+
)
|
1315
|
+
if errors:
|
1316
|
+
click.echo(_style(f" Errored: {click.style(str(errors), bold=True)}"))
|
1317
|
+
|
1318
|
+
# API operations that are skipped due to fail-fast are counted here as well
|
1319
|
+
total_skips = self.statistic.operations.selected - len(ctx.statistic.tested_operations) - errors
|
1320
|
+
if total_skips:
|
1321
|
+
click.echo(_style(f" Skipped: {click.style(str(total_skips), bold=True)}"))
|
1322
|
+
for reason in sorted(set(self.skip_reasons)):
|
1323
|
+
click.echo(_style(f" - {reason.rstrip('.')}"))
|
1324
|
+
click.echo()
|
1325
|
+
|
1326
|
+
def display_phases(self) -> None:
|
1327
|
+
click.echo(_style("Test Phases:", bold=True))
|
1328
|
+
|
1329
|
+
for phase in PhaseName:
|
1330
|
+
status, skip_reason = self.phases[phase]
|
1331
|
+
|
1332
|
+
if status == Status.SKIP:
|
1333
|
+
click.echo(_style(f" ⏭ {phase.value}", fg="yellow"), nl=False)
|
1334
|
+
if skip_reason:
|
1335
|
+
click.echo(_style(f" ({skip_reason.value})", fg="yellow"))
|
1336
|
+
else:
|
1337
|
+
click.echo()
|
1338
|
+
elif status == Status.SUCCESS:
|
1339
|
+
click.echo(_style(f" ✅ {phase.value}", fg="green"))
|
1340
|
+
elif status == Status.FAILURE:
|
1341
|
+
click.echo(_style(f" ❌ {phase.value}", fg="red"))
|
1342
|
+
elif status == Status.ERROR:
|
1343
|
+
click.echo(_style(f" 🚫 {phase.value}", fg="red"))
|
1344
|
+
elif status == Status.INTERRUPTED:
|
1345
|
+
click.echo(_style(f" ⚡ {phase.value}", fg="yellow"))
|
1346
|
+
click.echo()
|
1347
|
+
|
1348
|
+
def display_test_cases(self, ctx: ExecutionContext) -> None:
|
1349
|
+
if ctx.statistic.total_cases == 0:
|
1350
|
+
click.echo(_style("Test cases:", bold=True))
|
1351
|
+
click.echo(" No test cases were generated\n")
|
1352
|
+
return
|
1353
|
+
|
1354
|
+
unique_failures = sum(
|
1355
|
+
len(group.failures) for grouped in ctx.statistic.failures.values() for group in grouped.values()
|
1356
|
+
)
|
1357
|
+
click.echo(_style("Test cases:", bold=True))
|
1358
|
+
|
1359
|
+
parts = [f" {click.style(str(ctx.statistic.total_cases), bold=True)} generated"]
|
1360
|
+
|
1361
|
+
# Don't show pass/fail status if all cases were skipped
|
1362
|
+
if ctx.statistic.cases_without_checks == ctx.statistic.total_cases:
|
1363
|
+
parts.append(f"{click.style(str(ctx.statistic.cases_without_checks), bold=True)} skipped")
|
1364
|
+
else:
|
1365
|
+
if unique_failures > 0:
|
1366
|
+
parts.append(
|
1367
|
+
f"{click.style(str(ctx.statistic.cases_with_failures), bold=True)} found "
|
1368
|
+
f"{click.style(str(unique_failures), bold=True)} unique failures"
|
1369
|
+
)
|
1370
|
+
else:
|
1371
|
+
parts.append(f"{click.style(str(ctx.statistic.total_cases), bold=True)} passed")
|
1372
|
+
|
1373
|
+
if ctx.statistic.cases_without_checks > 0:
|
1374
|
+
parts.append(f"{click.style(str(ctx.statistic.cases_without_checks), bold=True)} skipped")
|
1375
|
+
|
1376
|
+
click.echo(_style(", ".join(parts) + "\n"))
|
1377
|
+
|
1378
|
+
def display_failures_summary(self, ctx: ExecutionContext) -> None:
|
1379
|
+
# Collect all unique failures and their counts by title
|
1380
|
+
failure_counts: dict[str, tuple[Severity, int]] = {}
|
1381
|
+
for grouped in ctx.statistic.failures.values():
|
1382
|
+
for group in grouped.values():
|
1383
|
+
for failure in group.failures:
|
1384
|
+
data = failure_counts.get(failure.title, (failure.severity, 0))
|
1385
|
+
failure_counts[failure.title] = (failure.severity, data[1] + 1)
|
1386
|
+
|
1387
|
+
click.echo(_style("Failures:", bold=True))
|
1388
|
+
|
1389
|
+
# Sort by severity first, then by title
|
1390
|
+
sorted_failures = sorted(failure_counts.items(), key=lambda x: (x[1][0], x[0]))
|
1391
|
+
|
1392
|
+
for title, (_, count) in sorted_failures:
|
1393
|
+
click.echo(_style(f" ❌ {title}: "), nl=False)
|
1394
|
+
click.echo(_style(str(count), bold=True))
|
1395
|
+
click.echo()
|
1396
|
+
|
1397
|
+
def display_errors_summary(self) -> None:
|
1398
|
+
# Group errors by title and count occurrences
|
1399
|
+
error_counts: dict[str, int] = {}
|
1400
|
+
for error in self.errors:
|
1401
|
+
title = error.info.title
|
1402
|
+
error_counts[title] = error_counts.get(title, 0) + 1
|
1403
|
+
|
1404
|
+
click.echo(_style("Errors:", bold=True))
|
1405
|
+
|
1406
|
+
for title in sorted(error_counts):
|
1407
|
+
click.echo(_style(f" 🚫 {title}: "), nl=False)
|
1408
|
+
click.echo(_style(str(error_counts[title]), bold=True))
|
1409
|
+
click.echo()
|
1410
|
+
|
1411
|
+
def display_final_line(self, ctx: ExecutionContext, event: events.EngineFinished) -> None:
|
1412
|
+
parts = []
|
1413
|
+
|
1414
|
+
unique_failures = sum(
|
1415
|
+
len(group.failures) for grouped in ctx.statistic.failures.values() for group in grouped.values()
|
1416
|
+
)
|
1417
|
+
if unique_failures:
|
1418
|
+
suffix = "s" if unique_failures > 1 else ""
|
1419
|
+
parts.append(f"{unique_failures} failure{suffix}")
|
1420
|
+
|
1421
|
+
if self.errors:
|
1422
|
+
suffix = "s" if len(self.errors) > 1 else ""
|
1423
|
+
parts.append(f"{len(self.errors)} error{suffix}")
|
1424
|
+
|
1425
|
+
total_warnings = sum(len(endpoints) for endpoints in self.warnings.missing_auth.values())
|
1426
|
+
if total_warnings:
|
1427
|
+
suffix = "s" if total_warnings > 1 else ""
|
1428
|
+
parts.append(f"{total_warnings} warning{suffix}")
|
1429
|
+
|
1430
|
+
if parts:
|
1431
|
+
message = f"{', '.join(parts)} in {event.running_time:.2f}s"
|
1432
|
+
color = "red" if (unique_failures or self.errors) else "yellow"
|
1433
|
+
elif ctx.statistic.total_cases == 0:
|
1434
|
+
message = "Empty test suite"
|
1435
|
+
color = "yellow"
|
1436
|
+
else:
|
1437
|
+
message = f"No issues found in {event.running_time:.2f}s"
|
1438
|
+
color = "green"
|
1439
|
+
|
1440
|
+
display_section_name(message, fg=color)
|
1441
|
+
|
1442
|
+
def display_reports(self) -> None:
|
1443
|
+
reports = self.config.reports
|
1444
|
+
if reports.vcr.enabled or reports.har.enabled or reports.junit.enabled:
|
1445
|
+
click.echo(_style("Reports:", bold=True))
|
1446
|
+
for format, report in (
|
1447
|
+
(ReportFormat.JUNIT, reports.junit),
|
1448
|
+
(ReportFormat.VCR, reports.vcr),
|
1449
|
+
(ReportFormat.HAR, reports.har),
|
1450
|
+
):
|
1451
|
+
if report.enabled:
|
1452
|
+
path = reports.get_path(format)
|
1453
|
+
click.echo(_style(f" - {format.value.upper()}: {path}"))
|
1454
|
+
click.echo()
|
1455
|
+
|
1456
|
+
def display_seed(self) -> None:
|
1457
|
+
click.echo(_style("Seed: ", bold=True), nl=False)
|
1458
|
+
# Deterministic mode can be applied to a subset of tests, but we only care if it is enabled everywhere
|
1459
|
+
# If not everywhere, then the seed matter and should be displayed
|
1460
|
+
if self.config.seed is None or self.config.generation.deterministic:
|
1461
|
+
click.echo("not used in the deterministic mode")
|
1462
|
+
else:
|
1463
|
+
click.echo(str(self.config.seed))
|
1464
|
+
click.echo()
|
1465
|
+
|
1466
|
+
def _on_engine_finished(self, ctx: ExecutionContext, event: events.EngineFinished) -> None:
|
1467
|
+
assert self.loading_manager is None
|
1468
|
+
assert self.probing_manager is None
|
1469
|
+
assert self.unit_tests_manager is None
|
1470
|
+
assert self.stateful_tests_manager is None
|
1471
|
+
if self.errors:
|
1472
|
+
display_section_name("ERRORS")
|
1473
|
+
errors = sorted(self.errors, key=lambda r: (r.phase.value, r.label, r.info.title))
|
1474
|
+
for label, group_errors in groupby(errors, key=lambda r: r.label):
|
1475
|
+
display_section_name(label, "_", fg="red")
|
1476
|
+
_errors = list(group_errors)
|
1477
|
+
for idx, error in enumerate(_errors, 1):
|
1478
|
+
click.echo(error.info.format(bold=lambda x: click.style(x, bold=True)))
|
1479
|
+
if idx < len(_errors):
|
1480
|
+
click.echo()
|
1481
|
+
click.echo(
|
1482
|
+
_style(
|
1483
|
+
f"\nNeed more help?\n Join our Discord server: {DISCORD_LINK}",
|
1484
|
+
fg="red",
|
1485
|
+
)
|
1486
|
+
)
|
1487
|
+
display_failures(ctx)
|
1488
|
+
if not self.warnings.is_empty:
|
1489
|
+
self.display_warnings()
|
1490
|
+
if ctx.statistic.extraction_failures:
|
1491
|
+
self.display_stateful_failures(ctx)
|
1492
|
+
display_section_name("SUMMARY")
|
1493
|
+
click.echo()
|
1494
|
+
|
1495
|
+
if self.statistic:
|
1496
|
+
self.display_api_operations(ctx)
|
1497
|
+
|
1498
|
+
self.display_phases()
|
1499
|
+
|
1500
|
+
if ctx.statistic.failures:
|
1501
|
+
self.display_failures_summary(ctx)
|
1502
|
+
|
1503
|
+
if self.errors:
|
1504
|
+
self.display_errors_summary()
|
1505
|
+
|
1506
|
+
if not self.warnings.is_empty:
|
1507
|
+
click.echo(_style("Warnings:", bold=True))
|
1508
|
+
|
1509
|
+
if self.warnings.missing_auth:
|
1510
|
+
affected = sum(len(operations) for operations in self.warnings.missing_auth.values())
|
1511
|
+
suffix = "" if affected == 1 else "s"
|
1512
|
+
click.echo(
|
1513
|
+
_style(
|
1514
|
+
f" ⚠️ Missing authentication: {bold(str(affected))} operation{suffix} returned only 401/403 responses",
|
1515
|
+
fg="yellow",
|
1516
|
+
)
|
1517
|
+
)
|
1518
|
+
|
1519
|
+
if self.warnings.missing_test_data:
|
1520
|
+
count = len(self.warnings.missing_test_data)
|
1521
|
+
suffix = "" if count == 1 else "s"
|
1522
|
+
click.echo(
|
1523
|
+
_style(
|
1524
|
+
f" ⚠️ Missing valid test data: {bold(str(count))} operation{suffix} repeatedly returned 404 responses",
|
1525
|
+
fg="yellow",
|
1526
|
+
)
|
1527
|
+
)
|
1528
|
+
|
1529
|
+
if self.warnings.validation_mismatch:
|
1530
|
+
count = len(self.warnings.validation_mismatch)
|
1531
|
+
suffix = "" if count == 1 else "s"
|
1532
|
+
click.echo(
|
1533
|
+
_style(
|
1534
|
+
f" ⚠️ Schema validation mismatch: {bold(str(count))} operation{suffix} mostly rejected generated data",
|
1535
|
+
fg="yellow",
|
1536
|
+
)
|
1537
|
+
)
|
1538
|
+
|
1539
|
+
click.echo()
|
1540
|
+
|
1541
|
+
if ctx.summary_lines:
|
1542
|
+
_print_lines(ctx.summary_lines)
|
1543
|
+
click.echo()
|
1544
|
+
|
1545
|
+
self.display_test_cases(ctx)
|
1546
|
+
self.display_reports()
|
1547
|
+
self.display_seed()
|
1548
|
+
self.display_final_line(ctx, event)
|
1549
|
+
|
1550
|
+
|
1551
|
+
@dataclass
|
1552
|
+
class StatusCodeStatistic:
|
1553
|
+
"""Statistics about HTTP status codes in a scenario."""
|
1554
|
+
|
1555
|
+
counts: dict[int, int]
|
1556
|
+
total: int
|
1557
|
+
|
1558
|
+
__slots__ = ("counts", "total")
|
1559
|
+
|
1560
|
+
def ratio_for(self, status_code: int) -> float:
|
1561
|
+
"""Calculate the ratio of responses with the given status code."""
|
1562
|
+
if self.total == 0:
|
1563
|
+
return 0.0
|
1564
|
+
return self.counts.get(status_code, 0) / self.total
|
1565
|
+
|
1566
|
+
def _get_4xx_breakdown(self) -> tuple[int, int, int]:
|
1567
|
+
"""Get breakdown of 4xx responses: (404_count, other_4xx_count, total_4xx_count)."""
|
1568
|
+
count_404 = self.counts.get(404, 0)
|
1569
|
+
count_other_4xx = sum(
|
1570
|
+
count for code, count in self.counts.items() if 400 <= code < 500 and code not in {401, 403, 404}
|
1571
|
+
)
|
1572
|
+
total_4xx = count_404 + count_other_4xx
|
1573
|
+
return count_404, count_other_4xx, total_4xx
|
1574
|
+
|
1575
|
+
def _is_only_4xx_responses(self) -> bool:
|
1576
|
+
"""Check if all responses are 4xx (excluding 5xx)."""
|
1577
|
+
return all(400 <= code < 500 for code in self.counts.keys() if code not in {500})
|
1578
|
+
|
1579
|
+
def _can_warn_about_4xx(self) -> bool:
|
1580
|
+
"""Check basic conditions for 4xx warnings."""
|
1581
|
+
if self.total == 0:
|
1582
|
+
return False
|
1583
|
+
# Skip if only auth errors
|
1584
|
+
if set(self.counts.keys()) <= {401, 403, 500}:
|
1585
|
+
return False
|
1586
|
+
return self._is_only_4xx_responses()
|
1587
|
+
|
1588
|
+
def should_warn_about_missing_test_data(self) -> bool:
|
1589
|
+
"""Check if an operation should be warned about missing test data (significant 404 responses)."""
|
1590
|
+
if not self._can_warn_about_4xx():
|
1591
|
+
return False
|
1592
|
+
|
1593
|
+
count_404, _, total_4xx = self._get_4xx_breakdown()
|
1594
|
+
|
1595
|
+
if total_4xx == 0:
|
1596
|
+
return False
|
1597
|
+
|
1598
|
+
return (count_404 / total_4xx) >= OTHER_CLIENT_ERRORS_THRESHOLD
|
1599
|
+
|
1600
|
+
def should_warn_about_validation_mismatch(self) -> bool:
|
1601
|
+
"""Check if an operation should be warned about validation mismatch (significant 400/422 responses)."""
|
1602
|
+
if not self._can_warn_about_4xx():
|
1603
|
+
return False
|
1604
|
+
|
1605
|
+
_, count_other_4xx, total_4xx = self._get_4xx_breakdown()
|
1606
|
+
|
1607
|
+
if total_4xx == 0:
|
1608
|
+
return False
|
1609
|
+
|
1610
|
+
return (count_other_4xx / total_4xx) >= OTHER_CLIENT_ERRORS_THRESHOLD
|
1611
|
+
|
1612
|
+
|
1613
|
+
AUTH_ERRORS_THRESHOLD = 0.9
|
1614
|
+
OTHER_CLIENT_ERRORS_THRESHOLD = 0.1
|
1615
|
+
|
1616
|
+
|
1617
|
+
def aggregate_status_codes(interactions: Iterable[Interaction]) -> StatusCodeStatistic:
|
1618
|
+
"""Analyze status codes from interactions."""
|
1619
|
+
counts: dict[int, int] = {}
|
1620
|
+
total = 0
|
1621
|
+
|
1622
|
+
for interaction in interactions:
|
1623
|
+
if interaction.response is not None:
|
1624
|
+
status = interaction.response.status_code
|
1625
|
+
counts[status] = counts.get(status, 0) + 1
|
1626
|
+
total += 1
|
1627
|
+
|
1628
|
+
return StatusCodeStatistic(counts=counts, total=total)
|