langwatch-scenario 0.1.3__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langwatch_scenario-0.1.3.dist-info → langwatch_scenario-0.3.0.dist-info}/METADATA +95 -34
- langwatch_scenario-0.3.0.dist-info/RECORD +16 -0
- {langwatch_scenario-0.1.3.dist-info → langwatch_scenario-0.3.0.dist-info}/WHEEL +1 -1
- scenario/__init__.py +13 -3
- scenario/config.py +18 -7
- scenario/error_messages.py +81 -23
- scenario/pytest_plugin.py +8 -8
- scenario/scenario.py +144 -26
- scenario/scenario_agent_adapter.py +16 -0
- scenario/scenario_executor.py +405 -143
- scenario/testing_agent.py +123 -109
- scenario/types.py +96 -0
- scenario/utils.py +148 -5
- langwatch_scenario-0.1.3.dist-info/RECORD +0 -15
- scenario/result.py +0 -81
- {langwatch_scenario-0.1.3.dist-info → langwatch_scenario-0.3.0.dist-info}/entry_points.txt +0 -0
- {langwatch_scenario-0.1.3.dist-info → langwatch_scenario-0.3.0.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: langwatch-scenario
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.3.0
|
4
4
|
Summary: The end-to-end agent testing library
|
5
5
|
Author-email: LangWatch Team <support@langwatch.ai>
|
6
6
|
License: MIT
|
@@ -25,11 +25,13 @@ Requires-Dist: joblib>=1.4.2
|
|
25
25
|
Requires-Dist: wrapt>=1.17.2
|
26
26
|
Requires-Dist: pytest-asyncio>=0.26.0
|
27
27
|
Requires-Dist: rich<15.0.0,>=13.3.3
|
28
|
+
Requires-Dist: pksuid>=1.1.2
|
28
29
|
Provides-Extra: dev
|
29
30
|
Requires-Dist: black; extra == "dev"
|
30
31
|
Requires-Dist: isort; extra == "dev"
|
31
|
-
Requires-Dist: mypy; extra == "dev"
|
32
32
|
Requires-Dist: pytest-cov; extra == "dev"
|
33
|
+
Requires-Dist: pre-commit; extra == "dev"
|
34
|
+
Requires-Dist: commitizen; extra == "dev"
|
33
35
|
|
34
36
|

|
35
37
|
|
@@ -39,12 +41,17 @@ Requires-Dist: pytest-cov; extra == "dev"
|
|
39
41
|
|
40
42
|
# Scenario: Use an Agent to test your Agent
|
41
43
|
|
42
|
-
Scenario is
|
44
|
+
Scenario is an Agent Testing Framework for testing AI agents through Simulation Testing.
|
43
45
|
|
44
|
-
You define the scenarios, and the testing agent will simulate
|
46
|
+
You define the scenarios, and the testing agent will simulate a real user as it follows them, it will keep chatting back and forth with _your_ agent to play out the simulation, until it reaches the desired goal or detects an unexpected behavior based on the criteria you defined.
|
45
47
|
|
46
48
|
[📺 Video Tutorial](https://www.youtube.com/watch?v=f8NLpkY0Av4)
|
47
49
|
|
50
|
+
### See also
|
51
|
+
|
52
|
+
- [Scenario TypeScript](https://github.com/langwatch/scenario-ts/)
|
53
|
+
- [Scenario Go](https://github.com/langwatch/scenario-go/)
|
54
|
+
|
48
55
|
## Getting Started
|
49
56
|
|
50
57
|
Install pytest and scenario:
|
@@ -58,32 +65,40 @@ Now create your first scenario and save it as `tests/test_vegetarian_recipe_agen
|
|
58
65
|
```python
|
59
66
|
import pytest
|
60
67
|
|
61
|
-
from scenario import Scenario, TestingAgent, scenario_cache
|
68
|
+
from scenario import Scenario, TestingAgent, ScenarioAgentAdapter, AgentInput, AgentReturnTypes, scenario_cache
|
62
69
|
|
63
70
|
Scenario.configure(testing_agent=TestingAgent(model="openai/gpt-4o-mini"))
|
64
71
|
|
65
72
|
|
73
|
+
# Create an adapter to call your agent
|
74
|
+
class VegetarianRecipeAgentAdapter(ScenarioAgentAdapter):
|
75
|
+
def __init__(self, input: AgentInput):
|
76
|
+
self.agent = VegetarianRecipeAgent()
|
77
|
+
|
78
|
+
async def call(self, input: AgentInput) -> AgentReturnTypes:
|
79
|
+
return self.agent.run(input.last_new_user_message_str())
|
80
|
+
|
81
|
+
|
66
82
|
@pytest.mark.agent_test
|
67
83
|
@pytest.mark.asyncio
|
68
84
|
async def test_vegetarian_recipe_agent():
|
69
|
-
|
70
|
-
|
71
|
-
def vegetarian_recipe_agent(message, context):
|
72
|
-
# Call your agent here
|
73
|
-
return agent.run(message)
|
74
|
-
|
75
|
-
# Define the scenario
|
85
|
+
# Define the simulated scenario
|
76
86
|
scenario = Scenario(
|
77
|
-
"
|
87
|
+
name="dinner idea",
|
88
|
+
description="""
|
89
|
+
It's saturday evening, the user is very hungry and tired,
|
90
|
+
but have no money to order out, so they are looking for a recipe.
|
91
|
+
|
92
|
+
The user never mentions they want a vegetarian recipe.
|
93
|
+
""",
|
78
94
|
agent=vegetarian_recipe_agent,
|
79
|
-
|
80
|
-
|
81
|
-
"
|
82
|
-
"
|
83
|
-
|
84
|
-
|
85
|
-
"
|
86
|
-
"The agent asks more than two follow-up questions",
|
95
|
+
# List the evaluation criteria for the scenario to be considered successful
|
96
|
+
criteria=[
|
97
|
+
"Agent should not ask more than two follow-up questions",
|
98
|
+
"Agent should generate a recipe",
|
99
|
+
"Recipe should include a list of ingredients",
|
100
|
+
"Recipe should include step-by-step cooking instructions",
|
101
|
+
"Recipe should be vegetarian and not include any sort of meat",
|
87
102
|
],
|
88
103
|
)
|
89
104
|
|
@@ -111,9 +126,11 @@ class VegetarianRecipeAgent:
|
|
111
126
|
messages=[
|
112
127
|
{
|
113
128
|
"role": "system",
|
114
|
-
"content": """
|
115
|
-
|
116
|
-
|
129
|
+
"content": """
|
130
|
+
You are a vegetarian recipe agent.
|
131
|
+
Given the user request, ask AT MOST ONE follow-up question,
|
132
|
+
then provide a complete recipe. Keep your responses concise and focused.
|
133
|
+
""",
|
117
134
|
},
|
118
135
|
*self.history,
|
119
136
|
],
|
@@ -121,7 +138,7 @@ class VegetarianRecipeAgent:
|
|
121
138
|
message = response.choices[0].message # type: ignore
|
122
139
|
self.history.append(message)
|
123
140
|
|
124
|
-
return
|
141
|
+
return [message]
|
125
142
|
|
126
143
|
```
|
127
144
|
|
@@ -151,19 +168,20 @@ For example, in this Lovable Clone scenario test:
|
|
151
168
|
|
152
169
|
```python
|
153
170
|
scenario = Scenario(
|
154
|
-
"
|
171
|
+
name="dog walking startup landing page",
|
172
|
+
description="""
|
173
|
+
the user wants to create a new landing page for their dog walking startup
|
174
|
+
|
175
|
+
send the first message to generate the landing page, then a single follow up request to extend it, then give your final verdict
|
176
|
+
""",
|
155
177
|
agent=lovable_agent,
|
156
|
-
|
157
|
-
success_criteria=[
|
178
|
+
criteria=[
|
158
179
|
"agent reads the files before go and making changes",
|
159
|
-
"agent modified the index.css file",
|
160
|
-
"agent modified the Index.tsx file",
|
180
|
+
"agent modified the index.css file, not only the Index.tsx file",
|
161
181
|
"agent created a comprehensive landing page",
|
162
182
|
"agent extended the landing page with a new section",
|
163
|
-
|
164
|
-
|
165
|
-
"agent says it can't read the file",
|
166
|
-
"agent produces incomplete code or is too lazy to finish",
|
183
|
+
"agent should NOT say it can't read the file",
|
184
|
+
"agent should NOT produce incomplete code or be too lazy to finish",
|
167
185
|
],
|
168
186
|
max_turns=5,
|
169
187
|
)
|
@@ -173,6 +191,49 @@ result = await scenario.run()
|
|
173
191
|
|
174
192
|
You can find a fully working Lovable Clone example in [examples/test_lovable_clone.py](examples/test_lovable_clone.py).
|
175
193
|
|
194
|
+
## Specify a script for guiding the scenario
|
195
|
+
|
196
|
+
You can specify a script for guiding the scenario by passing a list of steps to the `script` field.
|
197
|
+
|
198
|
+
```python
|
199
|
+
@pytest.mark.agent_test
|
200
|
+
@pytest.mark.asyncio
|
201
|
+
async def test_ai_assistant_agent():
|
202
|
+
scenario = Scenario(
|
203
|
+
name="false assumptions",
|
204
|
+
description="""
|
205
|
+
The agent makes false assumption about being an ATM bank, and user corrects it
|
206
|
+
""",
|
207
|
+
agent=AiAssistantAgentAdapter,
|
208
|
+
criteria=[
|
209
|
+
"user should get good recommendations on river crossing",
|
210
|
+
"agent should NOT follow up about ATM recommendation after user has corrected them they are just hiking",
|
211
|
+
],
|
212
|
+
max_turns=5,
|
213
|
+
)
|
214
|
+
|
215
|
+
def check_if_tool_was_called(state: ScenarioExecutor) -> None:
|
216
|
+
assert state.has_tool_call("web_search")
|
217
|
+
|
218
|
+
result = await scenario.script(
|
219
|
+
[
|
220
|
+
# Define existing history of messages
|
221
|
+
scenario.user("how do I safely approach a bank?"),
|
222
|
+
# Or let it be generate automatically
|
223
|
+
scenario.agent(),
|
224
|
+
# Add custom assertions, for example making sure a tool was called
|
225
|
+
check_if_tool_was_called,
|
226
|
+
scenario.user(),
|
227
|
+
# Let the simulation proceed for 2 more turns
|
228
|
+
scenario.proceed(turns=2),
|
229
|
+
# Time to make a judgment call
|
230
|
+
scenario.judge(),
|
231
|
+
]
|
232
|
+
).run()
|
233
|
+
|
234
|
+
assert result.success
|
235
|
+
```
|
236
|
+
|
176
237
|
## Debug mode
|
177
238
|
|
178
239
|
You can enable debug mode by setting the `debug` field to `True` in the `Scenario.configure` method or in the specific scenario you are running, or by passing the `--debug` flag to pytest.
|
@@ -0,0 +1,16 @@
|
|
1
|
+
scenario/__init__.py,sha256=0OavO4hoZMFL6frlplNkR7BSHfGSOhuVtmKmTrOMFEs,844
|
2
|
+
scenario/cache.py,sha256=sYu16SAf-BnVYkWSlEDzpyynJGIQyNYsgMXPgCqEnmk,1719
|
3
|
+
scenario/config.py,sha256=NiCCmr8flds-VDzvF8ps4SChVTARtcWfEoHhK0UkDMQ,1076
|
4
|
+
scenario/error_messages.py,sha256=8_pa3HIaqkw08qOqeiRKDCNykr9jtofpNJoEV03aRWc,4690
|
5
|
+
scenario/pytest_plugin.py,sha256=oJtEPVPi5x50Z-UawVyVPNd6buvh_4msSZ-3hLFpw_Y,5770
|
6
|
+
scenario/scenario.py,sha256=K4Snu4-pJaoprEFyly7ZQT8qNlAamxt-eXibCJ0EIJU,7332
|
7
|
+
scenario/scenario_agent_adapter.py,sha256=Y2dP3z-2jLYCssQ20oHOphwwrRPQNo2HmLD2KBcJRu0,427
|
8
|
+
scenario/scenario_executor.py,sha256=geaP3Znd1he66L6ku3l2IAODj68TtAIk8b8Ssy494xA,15681
|
9
|
+
scenario/testing_agent.py,sha256=5S2PIl2hi9FBSVjjs9afXhEgiogryjBIyffH5iJBwdo,10676
|
10
|
+
scenario/types.py,sha256=-Uz0qg_fY5vAEkrZnM5CMqE5hiP8OtNErpDdHJmHtac,3179
|
11
|
+
scenario/utils.py,sha256=bx813RpZO3xyPfD-dTBbeLM9umWm3PGOq9pw48aJoHI,8113
|
12
|
+
langwatch_scenario-0.3.0.dist-info/METADATA,sha256=pywrVOVE2eE4Zk5wePzJoEfErNXWvgK-C8G-qfWp7EI,11040
|
13
|
+
langwatch_scenario-0.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
14
|
+
langwatch_scenario-0.3.0.dist-info/entry_points.txt,sha256=WlEnJ_gku0i18bIa3DSuGqXRX-QDQLe_s0YmRzK45TI,45
|
15
|
+
langwatch_scenario-0.3.0.dist-info/top_level.txt,sha256=45Mn28aedJsetnBMB5xSmrJ-yo701QLH89Zlz4r1clE,9
|
16
|
+
langwatch_scenario-0.3.0.dist-info/RECORD,,
|
scenario/__init__.py
CHANGED
@@ -3,10 +3,11 @@ Scenario: A testing library for conversational agents.
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
# First import non-dependent modules
|
6
|
-
from .
|
6
|
+
from .types import ScenarioResult, AgentInput, ScenarioAgentRole, AgentReturnTypes
|
7
7
|
from .config import ScenarioConfig
|
8
8
|
|
9
9
|
# Then import modules with dependencies
|
10
|
+
from .scenario_agent_adapter import ScenarioAgentAdapter
|
10
11
|
from .testing_agent import TestingAgent
|
11
12
|
from .scenario import Scenario
|
12
13
|
from .cache import scenario_cache
|
@@ -15,10 +16,19 @@ from .cache import scenario_cache
|
|
15
16
|
from .pytest_plugin import pytest_configure, scenario_reporter
|
16
17
|
|
17
18
|
__all__ = [
|
18
|
-
|
19
|
-
"TestingAgent",
|
19
|
+
# Types
|
20
20
|
"ScenarioResult",
|
21
|
+
"AgentInput",
|
22
|
+
"ScenarioAgentRole",
|
21
23
|
"ScenarioConfig",
|
24
|
+
"AgentReturnTypes",
|
25
|
+
|
26
|
+
# Classes
|
27
|
+
"Scenario",
|
28
|
+
"ScenarioAgentAdapter",
|
29
|
+
"TestingAgent",
|
30
|
+
|
31
|
+
# Plugins
|
22
32
|
"pytest_configure",
|
23
33
|
"scenario_reporter",
|
24
34
|
"scenario_cache",
|
scenario/config.py
CHANGED
@@ -2,10 +2,16 @@
|
|
2
2
|
Configuration module for Scenario.
|
3
3
|
"""
|
4
4
|
|
5
|
-
from typing import Optional, Union
|
5
|
+
from typing import TYPE_CHECKING, Any, Optional, Type, Union
|
6
6
|
from pydantic import BaseModel
|
7
7
|
|
8
|
-
|
8
|
+
if TYPE_CHECKING:
|
9
|
+
from scenario.scenario_agent_adapter import ScenarioAgentAdapter
|
10
|
+
|
11
|
+
ScenarioAgentType = ScenarioAgentAdapter
|
12
|
+
else:
|
13
|
+
ScenarioAgentType = Any
|
14
|
+
|
9
15
|
|
10
16
|
class ScenarioConfig(BaseModel):
|
11
17
|
"""
|
@@ -15,14 +21,19 @@ class ScenarioConfig(BaseModel):
|
|
15
21
|
such as the LLM provider and model to use for the testing agent.
|
16
22
|
"""
|
17
23
|
|
18
|
-
testing_agent: Optional[
|
24
|
+
testing_agent: Optional[Type[ScenarioAgentType]] = None
|
19
25
|
max_turns: Optional[int] = 10
|
20
26
|
verbose: Optional[Union[bool, int]] = True
|
21
27
|
cache_key: Optional[str] = None
|
22
28
|
debug: Optional[bool] = False
|
23
29
|
|
24
30
|
def merge(self, other: "ScenarioConfig") -> "ScenarioConfig":
|
25
|
-
return ScenarioConfig(
|
26
|
-
**
|
27
|
-
|
28
|
-
|
31
|
+
return ScenarioConfig(
|
32
|
+
**{
|
33
|
+
**self.items(),
|
34
|
+
**other.items(),
|
35
|
+
}
|
36
|
+
)
|
37
|
+
|
38
|
+
def items(self):
|
39
|
+
return {k: getattr(self, k) for k in self.model_dump(exclude_none=True).keys()}
|
scenario/error_messages.py
CHANGED
@@ -36,41 +36,99 @@ default_config_error_message = f"""
|
|
36
36
|
result = scenario.run()
|
37
37
|
|
38
38
|
assert result.success
|
39
|
-
|
39
|
+
"""
|
40
40
|
|
41
41
|
|
42
|
-
|
43
|
-
|
42
|
+
testing_agent_not_configured_error_message = f"""
|
43
|
+
|
44
|
+
{termcolor.colored("->", "cyan")} Testing agent was initialized without a model, please set the model when defining the testing agent, for example:
|
45
|
+
|
46
|
+
TestingAgent.with_config(model="openai/gpt-4.1-mini")
|
47
|
+
{termcolor.colored("^" * 53, "green")}
|
48
|
+
"""
|
49
|
+
|
50
|
+
|
51
|
+
def message_return_error_message(got: Any, class_name: str):
|
52
|
+
got_ = repr(got)
|
44
53
|
if len(got_) > 100:
|
45
54
|
got_ = got_[:100] + "..."
|
46
55
|
|
47
56
|
return f"""
|
48
|
-
{termcolor.colored("->", "cyan")}
|
57
|
+
{termcolor.colored("->", "cyan")} On the {termcolor.colored("call", "green")} method of the {class_name} agent adapter, you returned:
|
49
58
|
|
50
59
|
{indent(got_, ' ' * 4)}
|
51
60
|
|
52
|
-
{termcolor.colored("->", "cyan")} But
|
61
|
+
{termcolor.colored("->", "cyan")} But the adapter should return either a string, a dict on the OpenAI messages format, or a list of messages in the OpenAI messages format so the testing agent can understand what happened. For example:
|
62
|
+
|
63
|
+
class MyAgentAdapter(ScenarioAgentAdapter):
|
64
|
+
async def call(self, input: AgentInput) -> AgentReturnTypes:
|
65
|
+
response = call_my_agent(message)
|
66
|
+
|
67
|
+
return response.output_text
|
68
|
+
{termcolor.colored("^" * 27, "green")}
|
69
|
+
|
70
|
+
{termcolor.colored("->", "cyan")} Alternatively, you can return a list of messages in OpenAI messages format, this is useful for capturing tool calls and other before the final response:
|
71
|
+
|
72
|
+
class MyAgentAdapter(ScenarioAgentAdapter):
|
73
|
+
async def call(self, input: AgentInput) -> AgentReturnTypes:
|
74
|
+
response = call_my_agent(message)
|
75
|
+
|
76
|
+
return [
|
77
|
+
{{"role": "assistant", "content": response.output_text}},
|
78
|
+
{termcolor.colored("^" * 55, "green")}
|
79
|
+
]
|
80
|
+
"""
|
81
|
+
|
82
|
+
|
83
|
+
def message_invalid_agent_type(got: Any):
|
84
|
+
got_ = repr(got)
|
85
|
+
if len(got_) > 100:
|
86
|
+
got_ = got_[:100] + "..."
|
87
|
+
|
88
|
+
return f"""
|
89
|
+
{termcolor.colored("->", "cyan")} The {termcolor.colored("agent", "green")} argument of Scenario needs to receive a class that inherits from {termcolor.colored("ScenarioAgentAdapter", "green")}, but you passed:
|
90
|
+
|
91
|
+
{indent(got_, ' ' * 4)}
|
53
92
|
|
54
|
-
|
55
|
-
response = call_my_agent(message)
|
93
|
+
{termcolor.colored("->", "cyan")} Instead, wrap your agent in a ScenarioAgentAdapter subclass. For example:
|
56
94
|
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
95
|
+
class MyAgentAdapter(ScenarioAgentAdapter):
|
96
|
+
{termcolor.colored("^" * 43, "green")}
|
97
|
+
async def call(self, input: AgentInput) -> AgentReturnTypes:
|
98
|
+
response = call_my_agent(message)
|
61
99
|
|
62
|
-
|
100
|
+
return response.output_text
|
63
101
|
|
64
|
-
|
65
|
-
response = call_my_agent(message)
|
102
|
+
{termcolor.colored("->", "cyan")} And then you can use that on your scenario definition:
|
66
103
|
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
104
|
+
@pytest.mark.agent_test
|
105
|
+
def test_my_agent():
|
106
|
+
scenario = Scenario(
|
107
|
+
name="first scenario",
|
108
|
+
description=\"\"\"
|
109
|
+
Example scenario description to test your agent.
|
110
|
+
\"\"\",
|
111
|
+
agent=MyAgentAdapter,
|
112
|
+
{termcolor.colored("^" * 20, "green")}
|
113
|
+
criteria=[
|
114
|
+
"Requirement One",
|
115
|
+
"Requirement Two",
|
71
116
|
],
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
117
|
+
)
|
118
|
+
result = scenario.run()
|
119
|
+
|
120
|
+
assert result.success
|
121
|
+
"""
|
122
|
+
|
123
|
+
|
124
|
+
def agent_response_not_awaitable(class_name: str):
|
125
|
+
return f"""
|
126
|
+
{termcolor.colored("->", "cyan")} The {termcolor.colored("call", "green")} method of the {class_name} agent adapter returned a non-awaitable response, you probably forgot to add the {termcolor.colored("async", "green")} keyword to the method definition, make sure your code looks like this:
|
127
|
+
|
128
|
+
class {class_name}(ScenarioAgentAdapter):
|
129
|
+
async def call(self, input: AgentInput) -> AgentReturnTypes:
|
130
|
+
{termcolor.colored("^" * 5, "green")}
|
131
|
+
response = call_my_agent(message)
|
132
|
+
|
133
|
+
return response.output_text
|
134
|
+
"""
|
scenario/pytest_plugin.py
CHANGED
@@ -7,7 +7,7 @@ from typing import TypedDict
|
|
7
7
|
import functools
|
8
8
|
from termcolor import colored
|
9
9
|
|
10
|
-
from scenario.
|
10
|
+
from scenario.types import ScenarioResult
|
11
11
|
|
12
12
|
from .scenario import Scenario
|
13
13
|
|
@@ -82,7 +82,7 @@ class ScenarioReporter:
|
|
82
82
|
time = f" in {result.total_time:.2f}s (agent: {result.agent_time:.2f}s)"
|
83
83
|
|
84
84
|
print(
|
85
|
-
f"\n{idx}. {scenario.
|
85
|
+
f"\n{idx}. {scenario.name} - {colored(status, status_color, attrs=['bold'])}{time}"
|
86
86
|
)
|
87
87
|
|
88
88
|
print(
|
@@ -92,23 +92,23 @@ class ScenarioReporter:
|
|
92
92
|
)
|
93
93
|
)
|
94
94
|
|
95
|
-
if hasattr(result, "
|
96
|
-
criteria_count = len(result.
|
97
|
-
total_criteria = len(scenario.
|
95
|
+
if hasattr(result, "passed_criteria") and result.passed_criteria:
|
96
|
+
criteria_count = len(result.passed_criteria)
|
97
|
+
total_criteria = len(scenario.criteria)
|
98
98
|
criteria_color = (
|
99
99
|
"green" if criteria_count == total_criteria else "yellow"
|
100
100
|
)
|
101
101
|
print(
|
102
102
|
colored(
|
103
|
-
f"
|
103
|
+
f" Passed Criteria: {criteria_count}/{total_criteria}",
|
104
104
|
criteria_color,
|
105
105
|
)
|
106
106
|
)
|
107
107
|
|
108
|
-
if hasattr(result, "
|
108
|
+
if hasattr(result, "failed_criteria") and result.failed_criteria:
|
109
109
|
print(
|
110
110
|
colored(
|
111
|
-
f"
|
111
|
+
f" Failed Criteria: {len(result.failed_criteria)}",
|
112
112
|
"red",
|
113
113
|
)
|
114
114
|
)
|