langwatch-scenario 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langwatch-scenario
3
- Version: 0.2.0
3
+ Version: 0.3.0
4
4
  Summary: The end-to-end agent testing library
5
5
  Author-email: LangWatch Team <support@langwatch.ai>
6
6
  License: MIT
@@ -25,11 +25,13 @@ Requires-Dist: joblib>=1.4.2
25
25
  Requires-Dist: wrapt>=1.17.2
26
26
  Requires-Dist: pytest-asyncio>=0.26.0
27
27
  Requires-Dist: rich<15.0.0,>=13.3.3
28
+ Requires-Dist: pksuid>=1.1.2
28
29
  Provides-Extra: dev
29
30
  Requires-Dist: black; extra == "dev"
30
31
  Requires-Dist: isort; extra == "dev"
31
- Requires-Dist: mypy; extra == "dev"
32
32
  Requires-Dist: pytest-cov; extra == "dev"
33
+ Requires-Dist: pre-commit; extra == "dev"
34
+ Requires-Dist: commitizen; extra == "dev"
33
35
 
34
36
  ![scenario](https://github.com/langwatch/scenario/raw/main/assets/scenario-wide.webp)
35
37
 
@@ -39,9 +41,9 @@ Requires-Dist: pytest-cov; extra == "dev"
39
41
 
40
42
  # Scenario: Use an Agent to test your Agent
41
43
 
42
- Scenario is a library for testing agents end-to-end as a human would, but without having to manually do it. The automated testing agent covers every single scenario for you.
44
+ Scenario is an Agent Testing Framework for testing AI agents through Simulation Testing.
43
45
 
44
- You define the scenarios, and the testing agent will simulate your users as it follows them, it will keep chatting and evaluating your agent until it reaches the desired goal or detects an unexpected behavior.
46
+ You define the scenarios, and the testing agent will simulate a real user as it follows them, it will keep chatting back and forth with _your_ agent to play out the simulation, until it reaches the desired goal or detects an unexpected behavior based on the criteria you defined.
45
47
 
46
48
  [📺 Video Tutorial](https://www.youtube.com/watch?v=f8NLpkY0Av4)
47
49
 
@@ -63,20 +65,23 @@ Now create your first scenario and save it as `tests/test_vegetarian_recipe_agen
63
65
  ```python
64
66
  import pytest
65
67
 
66
- from scenario import Scenario, TestingAgent, scenario_cache
68
+ from scenario import Scenario, TestingAgent, ScenarioAgentAdapter, AgentInput, AgentReturnTypes, scenario_cache
67
69
 
68
70
  Scenario.configure(testing_agent=TestingAgent(model="openai/gpt-4o-mini"))
69
71
 
70
72
 
73
+ # Create an adapter to call your agent
74
+ class VegetarianRecipeAgentAdapter(ScenarioAgentAdapter):
75
+ def __init__(self, input: AgentInput):
76
+ self.agent = VegetarianRecipeAgent()
77
+
78
+ async def call(self, input: AgentInput) -> AgentReturnTypes:
79
+ return self.agent.run(input.last_new_user_message_str())
80
+
81
+
71
82
  @pytest.mark.agent_test
72
83
  @pytest.mark.asyncio
73
84
  async def test_vegetarian_recipe_agent():
74
- agent = VegetarianRecipeAgent()
75
-
76
- def vegetarian_recipe_agent(message, context):
77
- # Call your agent here
78
- return agent.run(message)
79
-
80
85
  # Define the simulated scenario
81
86
  scenario = Scenario(
82
87
  name="dinner idea",
@@ -133,7 +138,7 @@ class VegetarianRecipeAgent:
133
138
  message = response.choices[0].message # type: ignore
134
139
  self.history.append(message)
135
140
 
136
- return {"messages": [message]}
141
+ return [message]
137
142
 
138
143
  ```
139
144
 
@@ -186,6 +191,49 @@ result = await scenario.run()
186
191
 
187
192
  You can find a fully working Lovable Clone example in [examples/test_lovable_clone.py](examples/test_lovable_clone.py).
188
193
 
194
+ ## Specify a script for guiding the scenario
195
+
196
+ You can specify a script for guiding the scenario by passing a list of steps to the `script` field.
197
+
198
+ ```python
199
+ @pytest.mark.agent_test
200
+ @pytest.mark.asyncio
201
+ async def test_ai_assistant_agent():
202
+ scenario = Scenario(
203
+ name="false assumptions",
204
+ description="""
205
+ The agent makes false assumption about being an ATM bank, and user corrects it
206
+ """,
207
+ agent=AiAssistantAgentAdapter,
208
+ criteria=[
209
+ "user should get good recommendations on river crossing",
210
+ "agent should NOT follow up about ATM recommendation after user has corrected them they are just hiking",
211
+ ],
212
+ max_turns=5,
213
+ )
214
+
215
+ def check_if_tool_was_called(state: ScenarioExecutor) -> None:
216
+ assert state.has_tool_call("web_search")
217
+
218
+ result = await scenario.script(
219
+ [
220
+ # Define existing history of messages
221
+ scenario.user("how do I safely approach a bank?"),
222
+ # Or let it be generate automatically
223
+ scenario.agent(),
224
+ # Add custom assertions, for example making sure a tool was called
225
+ check_if_tool_was_called,
226
+ scenario.user(),
227
+ # Let the simulation proceed for 2 more turns
228
+ scenario.proceed(turns=2),
229
+ # Time to make a judgment call
230
+ scenario.judge(),
231
+ ]
232
+ ).run()
233
+
234
+ assert result.success
235
+ ```
236
+
189
237
  ## Debug mode
190
238
 
191
239
  You can enable debug mode by setting the `debug` field to `True` in the `Scenario.configure` method or in the specific scenario you are running, or by passing the `--debug` flag to pytest.
@@ -0,0 +1,16 @@
1
+ scenario/__init__.py,sha256=0OavO4hoZMFL6frlplNkR7BSHfGSOhuVtmKmTrOMFEs,844
2
+ scenario/cache.py,sha256=sYu16SAf-BnVYkWSlEDzpyynJGIQyNYsgMXPgCqEnmk,1719
3
+ scenario/config.py,sha256=NiCCmr8flds-VDzvF8ps4SChVTARtcWfEoHhK0UkDMQ,1076
4
+ scenario/error_messages.py,sha256=8_pa3HIaqkw08qOqeiRKDCNykr9jtofpNJoEV03aRWc,4690
5
+ scenario/pytest_plugin.py,sha256=oJtEPVPi5x50Z-UawVyVPNd6buvh_4msSZ-3hLFpw_Y,5770
6
+ scenario/scenario.py,sha256=K4Snu4-pJaoprEFyly7ZQT8qNlAamxt-eXibCJ0EIJU,7332
7
+ scenario/scenario_agent_adapter.py,sha256=Y2dP3z-2jLYCssQ20oHOphwwrRPQNo2HmLD2KBcJRu0,427
8
+ scenario/scenario_executor.py,sha256=geaP3Znd1he66L6ku3l2IAODj68TtAIk8b8Ssy494xA,15681
9
+ scenario/testing_agent.py,sha256=5S2PIl2hi9FBSVjjs9afXhEgiogryjBIyffH5iJBwdo,10676
10
+ scenario/types.py,sha256=-Uz0qg_fY5vAEkrZnM5CMqE5hiP8OtNErpDdHJmHtac,3179
11
+ scenario/utils.py,sha256=bx813RpZO3xyPfD-dTBbeLM9umWm3PGOq9pw48aJoHI,8113
12
+ langwatch_scenario-0.3.0.dist-info/METADATA,sha256=pywrVOVE2eE4Zk5wePzJoEfErNXWvgK-C8G-qfWp7EI,11040
13
+ langwatch_scenario-0.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
14
+ langwatch_scenario-0.3.0.dist-info/entry_points.txt,sha256=WlEnJ_gku0i18bIa3DSuGqXRX-QDQLe_s0YmRzK45TI,45
15
+ langwatch_scenario-0.3.0.dist-info/top_level.txt,sha256=45Mn28aedJsetnBMB5xSmrJ-yo701QLH89Zlz4r1clE,9
16
+ langwatch_scenario-0.3.0.dist-info/RECORD,,
scenario/__init__.py CHANGED
@@ -3,10 +3,11 @@ Scenario: A testing library for conversational agents.
3
3
  """
4
4
 
5
5
  # First import non-dependent modules
6
- from .result import ScenarioResult
6
+ from .types import ScenarioResult, AgentInput, ScenarioAgentRole, AgentReturnTypes
7
7
  from .config import ScenarioConfig
8
8
 
9
9
  # Then import modules with dependencies
10
+ from .scenario_agent_adapter import ScenarioAgentAdapter
10
11
  from .testing_agent import TestingAgent
11
12
  from .scenario import Scenario
12
13
  from .cache import scenario_cache
@@ -15,10 +16,19 @@ from .cache import scenario_cache
15
16
  from .pytest_plugin import pytest_configure, scenario_reporter
16
17
 
17
18
  __all__ = [
18
- "Scenario",
19
- "TestingAgent",
19
+ # Types
20
20
  "ScenarioResult",
21
+ "AgentInput",
22
+ "ScenarioAgentRole",
21
23
  "ScenarioConfig",
24
+ "AgentReturnTypes",
25
+
26
+ # Classes
27
+ "Scenario",
28
+ "ScenarioAgentAdapter",
29
+ "TestingAgent",
30
+
31
+ # Plugins
22
32
  "pytest_configure",
23
33
  "scenario_reporter",
24
34
  "scenario_cache",
scenario/config.py CHANGED
@@ -2,10 +2,16 @@
2
2
  Configuration module for Scenario.
3
3
  """
4
4
 
5
- from typing import Optional, Union
5
+ from typing import TYPE_CHECKING, Any, Optional, Type, Union
6
6
  from pydantic import BaseModel
7
7
 
8
- from scenario.testing_agent import TestingAgent
8
+ if TYPE_CHECKING:
9
+ from scenario.scenario_agent_adapter import ScenarioAgentAdapter
10
+
11
+ ScenarioAgentType = ScenarioAgentAdapter
12
+ else:
13
+ ScenarioAgentType = Any
14
+
9
15
 
10
16
  class ScenarioConfig(BaseModel):
11
17
  """
@@ -15,14 +21,19 @@ class ScenarioConfig(BaseModel):
15
21
  such as the LLM provider and model to use for the testing agent.
16
22
  """
17
23
 
18
- testing_agent: Optional[TestingAgent] = None
24
+ testing_agent: Optional[Type[ScenarioAgentType]] = None
19
25
  max_turns: Optional[int] = 10
20
26
  verbose: Optional[Union[bool, int]] = True
21
27
  cache_key: Optional[str] = None
22
28
  debug: Optional[bool] = False
23
29
 
24
30
  def merge(self, other: "ScenarioConfig") -> "ScenarioConfig":
25
- return ScenarioConfig(**{
26
- **self.model_dump(),
27
- **other.model_dump(exclude_none=True),
28
- })
31
+ return ScenarioConfig(
32
+ **{
33
+ **self.items(),
34
+ **other.items(),
35
+ }
36
+ )
37
+
38
+ def items(self):
39
+ return {k: getattr(self, k) for k in self.model_dump(exclude_none=True).keys()}
@@ -36,41 +36,99 @@ default_config_error_message = f"""
36
36
  result = scenario.run()
37
37
 
38
38
  assert result.success
39
- """
39
+ """
40
40
 
41
41
 
42
- def message_return_error_message(got: Any):
43
- got_ = got.__repr__()
42
+ testing_agent_not_configured_error_message = f"""
43
+
44
+ {termcolor.colored("->", "cyan")} Testing agent was initialized without a model, please set the model when defining the testing agent, for example:
45
+
46
+ TestingAgent.with_config(model="openai/gpt-4.1-mini")
47
+ {termcolor.colored("^" * 53, "green")}
48
+ """
49
+
50
+
51
+ def message_return_error_message(got: Any, class_name: str):
52
+ got_ = repr(got)
44
53
  if len(got_) > 100:
45
54
  got_ = got_[:100] + "..."
46
55
 
47
56
  return f"""
48
- {termcolor.colored("->", "cyan")} Your agent returned:
57
+ {termcolor.colored("->", "cyan")} On the {termcolor.colored("call", "green")} method of the {class_name} agent adapter, you returned:
49
58
 
50
59
  {indent(got_, ' ' * 4)}
51
60
 
52
- {termcolor.colored("->", "cyan")} But your agent should return a dict with either a "message" string key or a "messages" key in OpenAI messages format so the testing agent can understand what happened. For example:
61
+ {termcolor.colored("->", "cyan")} But the adapter should return either a string, a dict on the OpenAI messages format, or a list of messages in the OpenAI messages format so the testing agent can understand what happened. For example:
62
+
63
+ class MyAgentAdapter(ScenarioAgentAdapter):
64
+ async def call(self, input: AgentInput) -> AgentReturnTypes:
65
+ response = call_my_agent(message)
66
+
67
+ return response.output_text
68
+ {termcolor.colored("^" * 27, "green")}
69
+
70
+ {termcolor.colored("->", "cyan")} Alternatively, you can return a list of messages in OpenAI messages format, this is useful for capturing tool calls and other before the final response:
71
+
72
+ class MyAgentAdapter(ScenarioAgentAdapter):
73
+ async def call(self, input: AgentInput) -> AgentReturnTypes:
74
+ response = call_my_agent(message)
75
+
76
+ return [
77
+ {{"role": "assistant", "content": response.output_text}},
78
+ {termcolor.colored("^" * 55, "green")}
79
+ ]
80
+ """
81
+
82
+
83
+ def message_invalid_agent_type(got: Any):
84
+ got_ = repr(got)
85
+ if len(got_) > 100:
86
+ got_ = got_[:100] + "..."
87
+
88
+ return f"""
89
+ {termcolor.colored("->", "cyan")} The {termcolor.colored("agent", "green")} argument of Scenario needs to receive a class that inherits from {termcolor.colored("ScenarioAgentAdapter", "green")}, but you passed:
90
+
91
+ {indent(got_, ' ' * 4)}
53
92
 
54
- def my_agent_under_test(message, context):
55
- response = call_my_agent(message)
93
+ {termcolor.colored("->", "cyan")} Instead, wrap your agent in a ScenarioAgentAdapter subclass. For example:
56
94
 
57
- return {{
58
- "message": response.output_text
59
- {termcolor.colored("^" * 31, "green")}
60
- }}
95
+ class MyAgentAdapter(ScenarioAgentAdapter):
96
+ {termcolor.colored("^" * 43, "green")}
97
+ async def call(self, input: AgentInput) -> AgentReturnTypes:
98
+ response = call_my_agent(message)
61
99
 
62
- {termcolor.colored("->", "cyan")} Alternatively, you can return a list of messages in OpenAI messages format, you can also optionally provide extra artifacts:
100
+ return response.output_text
63
101
 
64
- def my_agent_under_test(message, context):
65
- response = call_my_agent(message)
102
+ {termcolor.colored("->", "cyan")} And then you can use that on your scenario definition:
66
103
 
67
- return {{
68
- "messages": [
69
- {{"role": "assistant", "content": response}}
70
- {termcolor.colored("^" * 42, "green")}
104
+ @pytest.mark.agent_test
105
+ def test_my_agent():
106
+ scenario = Scenario(
107
+ name="first scenario",
108
+ description=\"\"\"
109
+ Example scenario description to test your agent.
110
+ \"\"\",
111
+ agent=MyAgentAdapter,
112
+ {termcolor.colored("^" * 20, "green")}
113
+ criteria=[
114
+ "Requirement One",
115
+ "Requirement Two",
71
116
  ],
72
- "extra": {{
73
- # ... optional extra artifacts
74
- }}
75
- }}
76
- """
117
+ )
118
+ result = scenario.run()
119
+
120
+ assert result.success
121
+ """
122
+
123
+
124
+ def agent_response_not_awaitable(class_name: str):
125
+ return f"""
126
+ {termcolor.colored("->", "cyan")} The {termcolor.colored("call", "green")} method of the {class_name} agent adapter returned a non-awaitable response, you probably forgot to add the {termcolor.colored("async", "green")} keyword to the method definition, make sure your code looks like this:
127
+
128
+ class {class_name}(ScenarioAgentAdapter):
129
+ async def call(self, input: AgentInput) -> AgentReturnTypes:
130
+ {termcolor.colored("^" * 5, "green")}
131
+ response = call_my_agent(message)
132
+
133
+ return response.output_text
134
+ """
scenario/pytest_plugin.py CHANGED
@@ -7,7 +7,7 @@ from typing import TypedDict
7
7
  import functools
8
8
  from termcolor import colored
9
9
 
10
- from scenario.result import ScenarioResult
10
+ from scenario.types import ScenarioResult
11
11
 
12
12
  from .scenario import Scenario
13
13
 
scenario/scenario.py CHANGED
@@ -2,16 +2,29 @@
2
2
  Scenario module: defines the core Scenario class for agent testing.
3
3
  """
4
4
 
5
- from typing import Awaitable, List, Dict, Any, Optional, Callable, TypedDict, Union
5
+ from typing import (
6
+ Awaitable,
7
+ Callable,
8
+ List,
9
+ Dict,
10
+ Any,
11
+ Optional,
12
+ Type,
13
+ TypedDict,
14
+ Union,
15
+ )
6
16
  import asyncio
7
17
  import concurrent.futures
8
- from functools import partial
9
18
 
10
19
  from scenario.config import ScenarioConfig
20
+ from scenario.error_messages import (
21
+ default_config_error_message,
22
+ message_invalid_agent_type,
23
+ )
24
+ from scenario.scenario_agent_adapter import ScenarioAgentAdapter
11
25
  from scenario.scenario_executor import ScenarioExecutor
12
26
 
13
- from .result import ScenarioResult
14
- from .testing_agent import TestingAgent
27
+ from .types import ScenarioResult, ScriptStep
15
28
 
16
29
  from openai.types.chat import ChatCompletionMessageParam
17
30
 
@@ -34,18 +47,38 @@ class Scenario(ScenarioConfig):
34
47
 
35
48
  name: str
36
49
  description: str
37
- agent: Union[
38
- Callable[[str, Optional[Dict[str, Any]]], Dict[str, Any]],
39
- Callable[[str, Optional[Dict[str, Any]]], Awaitable[Dict[str, Any]]],
40
- ]
50
+ agents: List[Type[ScenarioAgentAdapter]]
41
51
  criteria: List[str]
42
52
 
43
- def __init__(self, name: str, description: str, **kwargs):
53
+ def __init__(
54
+ self,
55
+ name: str,
56
+ description: str,
57
+ criteria: List[str] = [],
58
+ agent: Optional[Type[ScenarioAgentAdapter]] = None,
59
+ testing_agent: Optional[Type[ScenarioAgentAdapter]] = None,
60
+ agents: List[Type[ScenarioAgentAdapter]] = [],
61
+ max_turns: Optional[int] = None,
62
+ verbose: Optional[Union[bool, int]] = None,
63
+ cache_key: Optional[str] = None,
64
+ debug: Optional[bool] = None,
65
+ ):
44
66
  """Validate scenario configuration after initialization."""
45
67
 
46
- default_config = getattr(Scenario, "default_config", None)
68
+ config = ScenarioConfig(
69
+ testing_agent=testing_agent,
70
+ max_turns=max_turns,
71
+ verbose=verbose,
72
+ cache_key=cache_key,
73
+ debug=debug,
74
+ )
75
+
76
+ kwargs = config.items()
77
+ default_config: Optional[ScenarioConfig] = getattr(
78
+ Scenario, "default_config", None
79
+ )
47
80
  if default_config:
48
- kwargs = {**default_config.model_dump(), **kwargs}
81
+ kwargs = default_config.merge(config).items()
49
82
 
50
83
  if not name:
51
84
  raise ValueError("Scenario name cannot be empty")
@@ -55,19 +88,48 @@ class Scenario(ScenarioConfig):
55
88
  raise ValueError("Scenario description cannot be empty")
56
89
  kwargs["description"] = description
57
90
 
58
- # TODO: allow not having any criteria, for scripted scenarios
59
- if not kwargs.get("criteria"):
60
- raise ValueError("Scenario must have at least one criteria")
91
+ kwargs["criteria"] = criteria
61
92
 
62
- if kwargs.get("max_turns", 0) < 1:
93
+ if kwargs.get("max_turns", 10) < 1:
63
94
  raise ValueError("max_turns must be a positive integer")
64
95
 
65
- # Ensure agent is callable
66
- if not callable(kwargs.get("agent")):
67
- raise ValueError("Agent must be a callable function")
96
+ if not agents and not agent:
97
+ raise ValueError(
98
+ "Missing required argument `agent`. Either `agent` or `agents` argument must be provided for the Scenario"
99
+ )
100
+
101
+ if not agents and not kwargs.get("testing_agent"):
102
+ raise Exception(default_config_error_message)
103
+
104
+ agents = agents or [
105
+ kwargs.get("testing_agent"),
106
+ agent, # type: ignore
107
+ ]
108
+
109
+ # Ensure each agent is a ScenarioAgentAdapter
110
+ for agent in agents:
111
+ if (
112
+ not agent
113
+ or not isinstance(agent, type)
114
+ or not issubclass(agent, ScenarioAgentAdapter)
115
+ ):
116
+ raise ValueError(message_invalid_agent_type(agent))
117
+ kwargs["agents"] = agents
68
118
 
69
119
  super().__init__(**kwargs)
70
120
 
121
+ def script(self, script: List[ScriptStep]):
122
+ class ScriptedScenario:
123
+ def __init__(self, scenario: "Scenario"):
124
+ self._scenario = scenario
125
+
126
+ async def run(
127
+ self, context: Optional[Dict[str, Any]] = None
128
+ ) -> ScenarioResult:
129
+ return await self._scenario._run(context, script)
130
+
131
+ return ScriptedScenario(self)
132
+
71
133
  async def run(self, context: Optional[Dict[str, Any]] = None) -> ScenarioResult:
72
134
  """
73
135
  Run the scenario against the agent under test.
@@ -79,6 +141,13 @@ class Scenario(ScenarioConfig):
79
141
  ScenarioResult containing the test outcome
80
142
  """
81
143
 
144
+ return await self._run(context, None)
145
+
146
+ async def _run(
147
+ self,
148
+ context: Optional[Dict[str, Any]] = None,
149
+ script: Optional[List[ScriptStep]] = None,
150
+ ) -> ScenarioResult:
82
151
  # We'll use a thread pool to run the execution logic, we
83
152
  # require a separate thread because even though asyncio is
84
153
  # being used throughout, any user code on the callback can
@@ -90,7 +159,9 @@ class Scenario(ScenarioConfig):
90
159
  asyncio.set_event_loop(loop)
91
160
 
92
161
  try:
93
- return loop.run_until_complete(ScenarioExecutor(self).run(context))
162
+ return loop.run_until_complete(
163
+ ScenarioExecutor(self, context, script).run()
164
+ )
94
165
  finally:
95
166
  loop.close()
96
167
 
@@ -104,7 +175,7 @@ class Scenario(ScenarioConfig):
104
175
  @classmethod
105
176
  def configure(
106
177
  cls,
107
- testing_agent: Optional[TestingAgent] = None,
178
+ testing_agent: Optional[Type[ScenarioAgentAdapter]] = None,
108
179
  max_turns: Optional[int] = None,
109
180
  verbose: Optional[Union[bool, int]] = None,
110
181
  cache_key: Optional[str] = None,
@@ -121,3 +192,47 @@ class Scenario(ScenarioConfig):
121
192
  debug=debug,
122
193
  )
123
194
  )
195
+
196
+ # Scenario Scripting
197
+
198
+ def message(self, message: ChatCompletionMessageParam) -> ScriptStep:
199
+ return lambda state: state.message(message)
200
+
201
+ def user(
202
+ self, content: Optional[Union[str, ChatCompletionMessageParam]] = None
203
+ ) -> ScriptStep:
204
+ return lambda state: state.user(content)
205
+
206
+ def agent(
207
+ self, content: Optional[Union[str, ChatCompletionMessageParam]] = None
208
+ ) -> ScriptStep:
209
+ return lambda state: state.agent(content)
210
+
211
+ def judge(
212
+ self, content: Optional[Union[str, ChatCompletionMessageParam]] = None
213
+ ) -> ScriptStep:
214
+ return lambda state: state.judge(content)
215
+
216
+ def proceed(
217
+ self,
218
+ turns: Optional[int] = None,
219
+ on_turn: Optional[
220
+ Union[
221
+ Callable[[ScenarioExecutor], None],
222
+ Callable[[ScenarioExecutor], Awaitable[None]],
223
+ ]
224
+ ] = None,
225
+ on_step: Optional[
226
+ Union[
227
+ Callable[[ScenarioExecutor], None],
228
+ Callable[[ScenarioExecutor], Awaitable[None]],
229
+ ]
230
+ ] = None,
231
+ ) -> ScriptStep:
232
+ return lambda state: state.proceed(turns, on_turn, on_step)
233
+
234
+ def succeed(self) -> ScriptStep:
235
+ return lambda state: state.succeed()
236
+
237
+ def fail(self) -> ScriptStep:
238
+ return lambda state: state.fail()
@@ -0,0 +1,16 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import ClassVar, Set
3
+
4
+ from .types import AgentInput, AgentReturnTypes, ScenarioAgentRole
5
+
6
+
7
+ class ScenarioAgentAdapter(ABC):
8
+ roles: ClassVar[Set[ScenarioAgentRole]] = {ScenarioAgentRole.AGENT}
9
+
10
+ def __init__(self, input: AgentInput):
11
+ super().__init__()
12
+ pass
13
+
14
+ @abstractmethod
15
+ async def call(self, input: AgentInput) -> AgentReturnTypes:
16
+ pass